aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/user.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/user.h')
-rw-r--r--arch/powerpc/include/asm/user.h51
1 files changed, 51 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
new file mode 100644
index 00000000000..3fd4545dd74
--- /dev/null
+++ b/arch/powerpc/include/asm/user.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_POWERPC_USER_H
+#define _ASM_POWERPC_USER_H
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Adapted from <asm-alpha/user.h>
+ *
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd, NOT the osf-core). The file contents
+ * are as follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ */
+struct user {
+ struct pt_regs regs; /* entire machine state */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ unsigned long u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+#endif /* _ASM_POWERPC_USER_H */
/table> -rw-r--r--drivers/crypto/caam/Makefile7
-rw-r--r--drivers/crypto/caam/caamalg.c702
-rw-r--r--drivers/crypto/caam/caamhash.c196
-rw-r--r--drivers/crypto/caam/caamrng.c49
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c492
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/desc.h39
-rw-r--r--drivers/crypto/caam/desc_constr.h109
-rw-r--r--drivers/crypto/caam/error.c391
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h24
-rw-r--r--drivers/crypto/caam/jr.c383
-rw-r--r--drivers/crypto/caam/jr.h7
-rw-r--r--drivers/crypto/caam/key_gen.c15
-rw-r--r--drivers/crypto/caam/key_gen.h2
-rw-r--r--drivers/crypto/caam/pdb.h1
-rw-r--r--drivers/crypto/caam/regs.h72
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h34
-rw-r--r--drivers/crypto/ccp/Kconfig24
-rw-r--r--drivers/crypto/ccp/Makefile10
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c365
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c279
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c369
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c388
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c437
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h197
-rw-r--r--drivers/crypto/ccp/ccp-dev.c608
-rw-r--r--drivers/crypto/ccp/ccp-dev.h272
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2126
-rw-r--r--drivers/crypto/ccp/ccp-pci.c360
-rw-r--r--drivers/crypto/geode-aes.c36
-rw-r--r--drivers/crypto/geode-aes.h6
-rw-r--r--drivers/crypto/hifn_795x.c10
-rw-r--r--drivers/crypto/ixp4xx_crypto.c84
-rw-r--r--drivers/crypto/mv_cesa.c23
-rw-r--r--drivers/crypto/mxs-dcp.c1103
-rw-r--r--drivers/crypto/n2_core.c50
-rw-r--r--drivers/crypto/nx/nx-842.c60
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c58
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c283
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c52
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c51
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c298
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c205
-rw-r--r--drivers/crypto/nx/nx-sha256.c128
-rw-r--r--drivers/crypto/nx/nx-sha512.c134
-rw-r--r--drivers/crypto/nx/nx.c77
-rw-r--r--drivers/crypto/nx/nx.h3
-rw-r--r--drivers/crypto/omap-aes.c1113
-rw-r--r--drivers/crypto/omap-des.c1235
-rw-r--r--drivers/crypto/omap-sham.c1235
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c61
-rw-r--r--drivers/crypto/s5p-sss.c167
-rw-r--r--drivers/crypto/sahara.c1058
-rw-r--r--drivers/crypto/talitos.c225
-rw-r--r--drivers/crypto/tegra-aes.c1095
-rw-r--r--drivers/crypto/tegra-aes.h103
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c6
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h7
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c71
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h5
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c648
76 files changed, 15449 insertions, 4377 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 308c7fb92a6..02f177aeb16 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -224,7 +224,7 @@ config CRYPTO_DEV_TALITOS
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
@@ -242,22 +242,37 @@ config CRYPTO_DEV_PPC4XX
This option allows you to have support for AMCC crypto acceleration.
config CRYPTO_DEV_OMAP_SHAM
- tristate "Support for OMAP SHA1/MD5 hw accelerator"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
+ depends on ARCH_OMAP2PLUS
select CRYPTO_SHA1
select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
help
- OMAP processors have SHA1/MD5 hw accelerator. Select this if you
- want to use the OMAP module for SHA1/MD5 algorithms.
+ OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you
+ want to use the OMAP module for MD5/SHA1/SHA2 algorithms.
config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
+config CRYPTO_DEV_OMAP_DES
+ tristate "Support for OMAP DES3DES hw engine"
+ depends on ARCH_OMAP2PLUS
+ select CRYPTO_DES
+ select CRYPTO_BLKCIPHER2
+ help
+ OMAP processors have DES/3DES module accelerator. Select this if you
+ want to use the OMAP module for DES and 3DES algorithms. Currently
+ the ECB and CBC modes of operation supported by the driver. Also
+ accesses made on unaligned boundaries are also supported.
+
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
depends on ARCH_PICOXCELL && HAVE_CLK
@@ -275,31 +290,30 @@ config CRYPTO_DEV_PICOXCELL
Saying m here will build a module named pipcoxcell_crypto.
+config CRYPTO_DEV_SAHARA
+ tristate "Support for SAHARA crypto accelerator"
+ depends on ARCH_MXC && OF
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ help
+ This option enables support for the SAHARA HW crypto accelerator
+ found in some Freescale i.MX chips.
+
config CRYPTO_DEV_S5P
- tristate "Support for Samsung S5PV210 crypto accelerator"
- depends on ARCH_S5PV210
+ tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
This option allows you to have support for S5P crypto acceleration.
- Select this to offload Samsung S5PV210 or S5PC110 from AES
+ Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution.
-config CRYPTO_DEV_TEGRA_AES
- tristate "Support for TEGRA AES hw engine"
- depends on ARCH_TEGRA
- select CRYPTO_AES
- help
- TEGRA processors have AES module accelerator. Select this if you
- want to use the TEGRA module for AES algorithms.
-
- To compile this driver as a module, choose M here: the module
- will be called tegra-aes.
-
config CRYPTO_DEV_NX
bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
- depends on PPC64 && IBMVIO
+ depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN
default n
help
Support for Power7+ in-Nest cryptographic acceleration.
@@ -360,17 +374,48 @@ config CRYPTO_DEV_ATMEL_TDES
will be called atmel-tdes.
config CRYPTO_DEV_ATMEL_SHA
- tristate "Support for Atmel SHA1/SHA256 hw accelerator"
+ tristate "Support for Atmel SHA hw accelerator"
depends on ARCH_AT91
select CRYPTO_SHA1
select CRYPTO_SHA256
+ select CRYPTO_SHA512
select CRYPTO_ALGAPI
help
- Some Atmel processors have SHA1/SHA256 hw accelerator.
+ Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512
+ hw accelerator.
Select this if you want to use the Atmel module for
- SHA1/SHA256 algorithms.
+ SHA1/SHA224/SHA256/SHA384/SHA512 algorithms.
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_CCP
+ bool "Support for AMD Cryptographic Coprocessor"
+ depends on X86 && PCI
+ default n
+ help
+ The AMD Cryptographic Coprocessor provides hardware support
+ for encryption, hashing and related operations.
+
+if CRYPTO_DEV_CCP
+ source "drivers/crypto/ccp/Kconfig"
+endif
+
+config CRYPTO_DEV_MXS_DCP
+ tristate "Support for Freescale MXS DCP"
+ depends on ARCH_MXS
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ help
+ The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
+ co-processor on the die.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mxs-dcp.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 880a47b0b02..482f090d16d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,22 +1,25 @@
-obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
-obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
-obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
+obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-y := n2_core.o n2_asm.o
+obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
-obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
+obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
-obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
-obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a33243c17b0..4afca396877 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -32,10 +32,10 @@
#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
-void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
- u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
- u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
- u32 dir)
+static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+ u32 save_iv, u32 ld_h, u32 ld_iv,
+ u32 hdr_proc, u32 h, u32 c, u32 pad_type,
+ u32 op_grp, u32 op, u32 dir)
{
sa->sa_command_0.w = 0;
sa->sa_command_0.bf.save_hash_state = save_h;
@@ -52,9 +52,10 @@ void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
sa->sa_command_0.bf.dir = dir;
}
-void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
- u32 cfb, u32 esn, u32 sn_mask, u32 mute,
- u32 cp_pad, u32 cp_pay, u32 cp_hdr)
+static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
+ u32 hmac_mc, u32 cfb, u32 esn,
+ u32 sn_mask, u32 mute, u32 cp_pad,
+ u32 cp_pay, u32 cp_hdr)
{
sa->sa_command_1.w = 0;
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index f88e3d8f6b6..37f9cc98ba1 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -27,6 +27,9 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/dcr.h>
@@ -721,7 +724,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
- dev_set_drvdata(core_dev->device, NULL);
iounmap(core_dev->dev->ce_base);
kfree(core_dev->dev);
kfree(core_dev);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 8061336e07e..a083474991a 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -38,7 +39,8 @@
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/atmel-aes.h>
+#include <linux/platform_data/crypto-atmel.h>
+#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#define CFB8_BLOCK_SIZE 1
@@ -47,7 +49,7 @@
#define CFB64_BLOCK_SIZE 8
/* AES flags */
-#define AES_FLAGS_MODE_MASK 0x01ff
+#define AES_FLAGS_MODE_MASK 0x03ff
#define AES_FLAGS_ENCRYPT BIT(0)
#define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_CFB BIT(2)
@@ -55,21 +57,26 @@
#define AES_FLAGS_CFB16 BIT(4)
#define AES_FLAGS_CFB32 BIT(5)
#define AES_FLAGS_CFB64 BIT(6)
-#define AES_FLAGS_OFB BIT(7)
-#define AES_FLAGS_CTR BIT(8)
+#define AES_FLAGS_CFB128 BIT(7)
+#define AES_FLAGS_OFB BIT(8)
+#define AES_FLAGS_CTR BIT(9)
#define AES_FLAGS_INIT BIT(16)
#define AES_FLAGS_DMA BIT(17)
#define AES_FLAGS_BUSY BIT(18)
+#define AES_FLAGS_FAST BIT(19)
-#define AES_FLAGS_DUALBUFF BIT(24)
-
-#define ATMEL_AES_QUEUE_LENGTH 1
-#define ATMEL_AES_CACHE_SIZE 0
+#define ATMEL_AES_QUEUE_LENGTH 50
#define ATMEL_AES_DMA_THRESHOLD 16
+struct atmel_aes_caps {
+ bool has_dualbuff;
+ bool has_cfb64;
+ u32 max_burst_size;
+};
+
struct atmel_aes_dev;
struct atmel_aes_ctx {
@@ -77,6 +84,8 @@ struct atmel_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+
+ u16 block_size;
};
struct atmel_aes_reqctx {
@@ -112,20 +121,27 @@ struct atmel_aes_dev {
struct scatterlist *in_sg;
unsigned int nb_in_sg;
-
+ size_t in_offset;
struct scatterlist *out_sg;
unsigned int nb_out_sg;
+ size_t out_offset;
size_t bufcnt;
+ size_t buflen;
+ size_t dma_size;
- u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
- int dma_in;
+ void *buf_in;
+ int dma_in;
+ dma_addr_t dma_addr_in;
struct atmel_aes_dma dma_lch_in;
- u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
- int dma_out;
+ void *buf_out;
+ int dma_out;
+ dma_addr_t dma_addr_out;
struct atmel_aes_dma dma_lch_out;
+ struct atmel_aes_caps caps;
+
u32 hw_version;
};
@@ -165,6 +181,37 @@ static int atmel_aes_sg_length(struct ablkcipher_request *req,
return sg_nb;
}
+static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
+ void *buf, size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
{
return readl_relaxed(dd->io_base + offset);
@@ -190,14 +237,6 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
atmel_aes_write(dd, offset, *value);
}
-static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
-{
- atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
-
- if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
- dd->flags |= AES_FLAGS_DUALBUFF;
-}
-
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
@@ -225,7 +264,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
if (!(dd->flags & AES_FLAGS_INIT)) {
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
- atmel_aes_dualbuff_test(dd);
+ atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
dd->flags |= AES_FLAGS_INIT;
dd->err = 0;
}
@@ -233,11 +272,19 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
return 0;
}
+static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
+{
+ return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
+}
+
static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
{
atmel_aes_hw_init(dd);
- dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
+ dd->hw_version = atmel_aes_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
}
@@ -260,50 +307,77 @@ static void atmel_aes_dma_callback(void *data)
tasklet_schedule(&dd->done_task);
}
-static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
+static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
+ dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
{
+ struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
- int nb_dma_sg_in, nb_dma_sg_out;
- dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
- if (!dd->nb_in_sg)
- goto exit_err;
+ dd->dma_size = length;
- nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
- DMA_TO_DEVICE);
- if (!nb_dma_sg_in)
- goto exit_err;
+ if (!(dd->flags & AES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
- in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
- nb_dma_sg_in, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (dd->flags & AES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & AES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
- if (!in_desc)
- goto unmap_in;
+ if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
+ AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ } else {
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ }
- /* callback not needed */
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
- dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
- if (!dd->nb_out_sg)
- goto unmap_in;
+ dd->flags |= AES_FLAGS_DMA;
- nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
- DMA_FROM_DEVICE);
- if (!nb_dma_sg_out)
- goto unmap_out;
+ sg_init_table(&sg[0], 1);
+ sg_dma_address(&sg[0]) = dma_addr_in;
+ sg_dma_len(&sg[0]) = length;
- out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
- nb_dma_sg_out, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ sg_init_table(&sg[1], 1);
+ sg_dma_address(&sg[1]) = dma_addr_out;
+ sg_dma_len(&sg[1]) = length;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc)
+ return -EINVAL;
+
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!out_desc)
- goto unmap_out;
+ return -EINVAL;
out_desc->callback = atmel_aes_dma_callback;
out_desc->callback_param = dd;
- dd->total -= dd->req->nbytes;
-
dmaengine_submit(out_desc);
dma_async_issue_pending(dd->dma_lch_out.chan);
@@ -311,15 +385,6 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
dma_async_issue_pending(dd->dma_lch_in.chan);
return 0;
-
-unmap_out:
- dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
- DMA_FROM_DEVICE);
-unmap_in:
- dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
- DMA_TO_DEVICE);
-exit_err:
- return -EINVAL;
}
static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
@@ -332,7 +397,7 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
return -EINVAL;
dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
- if (!dd->nb_in_sg)
+ if (!dd->nb_out_sg)
return -EINVAL;
dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
@@ -352,30 +417,66 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
{
- int err;
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+
+ if ((!dd->in_offset) && (!dd->out_offset)) {
+ /* check for alignment */
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
+ fast = in && out;
+
+ if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+ fast = 0;
+ }
+
+
+ if (fast) {
+ count = min(dd->total, sg_dma_len(dd->in_sg));
+ count = min(count, sg_dma_len(dd->out_sg));
+
+ err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, dd->out_sg, 1,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, dd->in_sg, 1,
+ DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(dd->in_sg);
+ addr_out = sg_dma_address(dd->out_sg);
+
+ dd->flags |= AES_FLAGS_FAST;
- if (dd->flags & AES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & AES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
} else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ /* use cache buffers */
+ count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
+ dd->buf_in, dd->buflen, dd->total, 0);
+
+ addr_in = dd->dma_addr_in;
+ addr_out = dd->dma_addr_out;
+
+ dd->flags &= ~AES_FLAGS_FAST;
}
- dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
- dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+ dd->total -= count;
- dd->flags |= AES_FLAGS_DMA;
- err = atmel_aes_crypt_dma(dd);
+ err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
+
+ if (err && (dd->flags & AES_FLAGS_FAST)) {
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ }
return err;
}
@@ -410,6 +511,8 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
valmr |= AES_MR_CFBS_32b;
else if (dd->flags & AES_FLAGS_CFB64)
valmr |= AES_MR_CFBS_64b;
+ else if (dd->flags & AES_FLAGS_CFB128)
+ valmr |= AES_MR_CFBS_128b;
} else if (dd->flags & AES_FLAGS_OFB) {
valmr |= AES_MR_OPMOD_OFB;
} else if (dd->flags & AES_FLAGS_CTR) {
@@ -423,7 +526,7 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
valmr |= AES_MR_SMOD_IDATAR0;
- if (dd->flags & AES_FLAGS_DUALBUFF)
+ if (dd->caps.has_dualbuff)
valmr |= AES_MR_DUALBUFF;
} else {
valmr |= AES_MR_SMOD_AUTO;
@@ -477,7 +580,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
/* assign new request to device */
dd->req = req;
dd->total = req->nbytes;
+ dd->in_offset = 0;
dd->in_sg = req->src;
+ dd->out_offset = 0;
dd->out_sg = req->dst;
rctx = ablkcipher_request_ctx(req);
@@ -506,18 +611,86 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
{
int err = -EINVAL;
+ size_t count;
if (dd->flags & AES_FLAGS_DMA) {
- dma_unmap_sg(dd->dev, dd->out_sg,
- dd->nb_out_sg, DMA_FROM_DEVICE);
- dma_unmap_sg(dd->dev, dd->in_sg,
- dd->nb_in_sg, DMA_TO_DEVICE);
err = 0;
+ if (dd->flags & AES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
+ }
+
+ return err;
+}
+
+
+static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
+{
+ int err = -ENOMEM;
+
+ dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buflen = PAGE_SIZE;
+ dd->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ if (!dd->buf_in || !dd->buf_out) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ goto err_alloc;
+ }
+
+ /* MAP here */
+ dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
+ dd->buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_in;
+ }
+
+ dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
+ dd->buflen, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_out;
}
+ return 0;
+
+err_map_out:
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+err_map_in:
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+err_alloc:
+ if (err)
+ pr_err("error: %d\n", err);
return err;
}
+static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
+{
+ dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+}
+
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -525,9 +698,36 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct atmel_aes_dev *dd;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
- return -EINVAL;
+ if (mode & AES_FLAGS_CFB8) {
+ if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB8 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB8_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB16) {
+ if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB16 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB16_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB32) {
+ if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB32 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB32_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB64) {
+ if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB64 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB64_BLOCK_SIZE;
+ } else {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = AES_BLOCK_SIZE;
}
dd = atmel_aes_find_dev(ctx);
@@ -551,55 +751,54 @@ static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
}
}
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
+ struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- struct aes_platform_data *pdata;
- dma_cap_mask_t mask_in, mask_out;
-
- pdata = dd->dev->platform_data;
-
- if (pdata && pdata->dma_slave->txdata.dma_dev &&
- pdata->dma_slave->rxdata.dma_dev) {
-
- /* Try to grab 2 DMA channels */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_aes_filter, &pdata->dma_slave->rxdata);
- if (!dd->dma_lch_in.chan)
- goto err_dma_in;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- AES_IDATAR(0);
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- dma_cap_zero(mask_out);
- dma_cap_set(DMA_SLAVE, mask_out);
- dd->dma_lch_out.chan = dma_request_channel(mask_out,
- atmel_aes_filter, &pdata->dma_slave->txdata);
- if (!dd->dma_lch_out.chan)
- goto err_dma_out;
-
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
- dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
- AES_ODATAR(0);
- dd->dma_lch_out.dma_conf.src_maxburst = 1;
- dd->dma_lch_out.dma_conf.dst_maxburst = 1;
- dd->dma_lch_out.dma_conf.device_fc = false;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Try to grab 2 DMA channels */
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+ atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ AES_IDATAR(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+ atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ AES_ODATAR(0);
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
- return 0;
- } else {
- return -ENODEV;
- }
+ return 0;
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
+ dev_warn(dd->dev, "no DMA channel available\n");
return err;
}
@@ -665,13 +864,13 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
- AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
- AES_FLAGS_CFB);
+ AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
@@ -753,7 +952,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -773,7 +972,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -794,7 +993,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -815,7 +1014,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -836,7 +1035,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -857,7 +1056,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -876,7 +1075,7 @@ static struct crypto_alg aes_algs[] = {
.cra_driver_name = "atmel-cfb8-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB64_BLOCK_SIZE,
+ .cra_blocksize = CFB8_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0x0,
.cra_type = &crypto_ablkcipher_type,
@@ -899,7 +1098,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -915,15 +1114,14 @@ static struct crypto_alg aes_algs[] = {
},
};
-static struct crypto_alg aes_cfb64_alg[] = {
-{
+static struct crypto_alg aes_cfb64_alg = {
.cra_name = "cfb64(aes)",
.cra_driver_name = "atmel-cfb64-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -936,7 +1134,6 @@ static struct crypto_alg aes_cfb64_alg[] = {
.encrypt = atmel_aes_cfb64_encrypt,
.decrypt = atmel_aes_cfb64_decrypt,
}
-},
};
static void atmel_aes_queue_task(unsigned long data)
@@ -969,7 +1166,14 @@ static void atmel_aes_done_task(unsigned long data)
err = dd->err ? : err;
if (dd->total && !err) {
- err = atmel_aes_crypt_dma_start(dd);
+ if (dd->flags & AES_FLAGS_FAST) {
+ dd->in_sg = sg_next(dd->in_sg);
+ dd->out_sg = sg_next(dd->out_sg);
+ if (!dd->in_sg || !dd->out_sg)
+ err = -EINVAL;
+ }
+ if (!err)
+ err = atmel_aes_crypt_dma_start(dd);
if (!err)
return; /* DMA started. Not fininishing. */
}
@@ -1003,8 +1207,8 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- if (dd->hw_version >= 0x130)
- crypto_unregister_alg(&aes_cfb64_alg[0]);
+ if (dd->caps.has_cfb64)
+ crypto_unregister_alg(&aes_cfb64_alg);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -1017,10 +1221,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_algs;
}
- atmel_aes_hw_version_init(dd);
-
- if (dd->hw_version >= 0x130) {
- err = crypto_register_alg(&aes_cfb64_alg[0]);
+ if (dd->caps.has_cfb64) {
+ err = crypto_register_alg(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
@@ -1036,10 +1238,73 @@ err_aes_algs:
return err;
}
-static int __devinit atmel_aes_probe(struct platform_device *pdev)
+static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
+{
+ dd->caps.has_dualbuff = 0;
+ dd->caps.has_cfb64 = 0;
+ dd->caps.max_burst_size = 1;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xff0) {
+ case 0x130:
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_cfb64 = 1;
+ dd->caps.max_burst_size = 4;
+ break;
+ case 0x120:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged aes version, set minimum capabilities\n");
+ break;
+ }
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_aes_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
+
+static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else
+static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct aes_platform_data *pdata;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
unsigned long aes_phys_size;
@@ -1047,6 +1312,14 @@ static int __devinit atmel_aes_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
+ pdata = atmel_aes_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ err = PTR_ERR(pdata);
+ goto aes_dd_err;
+ }
+ }
+
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto aes_dd_err;
}
@@ -1099,7 +1372,7 @@ static int __devinit atmel_aes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- aes_dd->iclk = clk_get(&pdev->dev, NULL);
+ aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
@@ -1113,7 +1386,15 @@ static int __devinit atmel_aes_probe(struct platform_device *pdev)
goto aes_io_err;
}
- err = atmel_aes_dma_init(aes_dd);
+ atmel_aes_hw_version_init(aes_dd);
+
+ atmel_aes_get_cap(aes_dd);
+
+ err = atmel_aes_buff_init(aes_dd);
+ if (err)
+ goto err_aes_buff;
+
+ err = atmel_aes_dma_init(aes_dd, pdata);
if (err)
goto err_aes_dma;
@@ -1125,7 +1406,9 @@ static int __devinit atmel_aes_probe(struct platform_device *pdev)
if (err)
goto err_algs;
- dev_info(dev, "Atmel AES\n");
+ dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
+ dma_chan_name(aes_dd->dma_lch_in.chan),
+ dma_chan_name(aes_dd->dma_lch_out.chan));
return 0;
@@ -1135,6 +1418,8 @@ err_algs:
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
err_aes_dma:
+ atmel_aes_buff_cleanup(aes_dd);
+err_aes_buff:
iounmap(aes_dd->io_base);
aes_io_err:
clk_put(aes_dd->iclk);
@@ -1152,7 +1437,7 @@ aes_dd_err:
return err;
}
-static int __devexit atmel_aes_remove(struct platform_device *pdev)
+static int atmel_aes_remove(struct platform_device *pdev)
{
static struct atmel_aes_dev *aes_dd;
@@ -1185,10 +1470,11 @@ static int __devexit atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove = __devexit_p(atmel_aes_remove),
+ .remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_aes_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index dc53a20d7da..83b2d742566 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -14,10 +14,13 @@
#define SHA_MR_MODE_MANUAL 0x0
#define SHA_MR_MODE_AUTO 0x1
#define SHA_MR_MODE_PDC 0x2
-#define SHA_MR_DUALBUFF (1 << 3)
#define SHA_MR_PROCDLY (1 << 4)
#define SHA_MR_ALGO_SHA1 (0 << 8)
#define SHA_MR_ALGO_SHA256 (1 << 8)
+#define SHA_MR_ALGO_SHA384 (2 << 8)
+#define SHA_MR_ALGO_SHA512 (3 << 8)
+#define SHA_MR_ALGO_SHA224 (4 << 8)
+#define SHA_MR_DUALBUFF (1 << 16)
#define SHA_IER 0x10
#define SHA_IDR 0x14
@@ -33,6 +36,8 @@
#define SHA_ISR_URAT_MR (0x2 << 12)
#define SHA_ISR_URAT_WO (0x5 << 12)
+#define SHA_HW_VERSION 0xFC
+
#define SHA_TPR 0x108
#define SHA_TCR 0x10C
#define SHA_TNPR 0x118
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index bcdf55fdc62..0618be06b9f 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -38,6 +39,7 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
/* SHA flags */
@@ -52,11 +54,12 @@
#define SHA_FLAGS_FINUP BIT(16)
#define SHA_FLAGS_SG BIT(17)
#define SHA_FLAGS_SHA1 BIT(18)
-#define SHA_FLAGS_SHA256 BIT(19)
-#define SHA_FLAGS_ERROR BIT(20)
-#define SHA_FLAGS_PAD BIT(21)
-
-#define SHA_FLAGS_DUALBUFF BIT(24)
+#define SHA_FLAGS_SHA224 BIT(19)
+#define SHA_FLAGS_SHA256 BIT(20)
+#define SHA_FLAGS_SHA384 BIT(21)
+#define SHA_FLAGS_SHA512 BIT(22)
+#define SHA_FLAGS_ERROR BIT(23)
+#define SHA_FLAGS_PAD BIT(24)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
@@ -65,6 +68,12 @@
#define ATMEL_SHA_DMA_THRESHOLD 56
+struct atmel_sha_caps {
+ bool has_dma;
+ bool has_dualbuff;
+ bool has_sha224;
+ bool has_sha_384_512;
+};
struct atmel_sha_dev;
@@ -73,8 +82,8 @@ struct atmel_sha_reqctx {
unsigned long flags;
unsigned long op;
- u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
- size_t digcnt;
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
+ u64 digcnt[2];
size_t bufcnt;
size_t buflen;
dma_addr_t dma_addr;
@@ -84,6 +93,8 @@ struct atmel_sha_reqctx {
unsigned int offset; /* offset in current sg */
unsigned int total; /* total request */
+ size_t block_size;
+
u8 buffer[0] __aligned(sizeof(u32));
};
@@ -97,7 +108,12 @@ struct atmel_sha_ctx {
};
-#define ATMEL_SHA_QUEUE_LENGTH 1
+#define ATMEL_SHA_QUEUE_LENGTH 50
+
+struct atmel_sha_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
struct atmel_sha_dev {
struct list_head list;
@@ -114,6 +130,12 @@ struct atmel_sha_dev {
unsigned long flags;
struct crypto_queue queue;
struct ahash_request *req;
+
+ struct atmel_sha_dma dma_lch_in;
+
+ struct atmel_sha_caps caps;
+
+ u32 hw_version;
};
struct atmel_sha_drv {
@@ -137,14 +159,6 @@ static inline void atmel_sha_write(struct atmel_sha_dev *dd,
writel_relaxed(value, dd->io_base + offset);
}
-static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
-{
- atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
-
- if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
- dd->flags |= SHA_FLAGS_DUALBUFF;
-}
-
static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
{
size_t count;
@@ -176,31 +190,58 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
}
/*
- * The purpose of this padding is to ensure that the padded message
- * is a multiple of 512 bits. The bit "1" is appended at the end of
- * the message followed by "padlen-1" zero bits. Then a 64 bits block
- * equals to the message length in bits is appended.
+ * The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
*
- * padlen is calculated as followed:
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
* - if message length < 56 bytes then padlen = 56 - message length
* - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
*/
static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
{
unsigned int index, padlen;
- u64 bits;
- u64 size;
-
- bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
- size = cpu_to_be64(bits);
-
- index = ctx->bufcnt & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
- ctx->bufcnt += padlen + 8;
- ctx->flags |= SHA_FLAGS_PAD;
+ u64 bits[2];
+ u64 size[2];
+
+ size[0] = ctx->digcnt[0];
+ size[1] = ctx->digcnt[1];
+
+ size[0] += ctx->bufcnt;
+ if (size[0] < ctx->bufcnt)
+ size[1]++;
+
+ size[0] += length;
+ if (size[0] < length)
+ size[1]++;
+
+ bits[1] = cpu_to_be64(size[0] << 3);
+ bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
+
+ if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
+ index = ctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128+112) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
+ ctx->bufcnt += padlen + 16;
+ ctx->flags |= SHA_FLAGS_PAD;
+ } else {
+ index = ctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
+ ctx->bufcnt += padlen + 8;
+ ctx->flags |= SHA_FLAGS_PAD;
+ }
}
static int atmel_sha_init(struct ahash_request *req)
@@ -231,13 +272,35 @@ static int atmel_sha_init(struct ahash_request *req)
dev_dbg(dd->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
- if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
ctx->flags |= SHA_FLAGS_SHA1;
- else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
+ ctx->block_size = SHA1_BLOCK_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA224;
+ ctx->block_size = SHA224_BLOCK_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
ctx->flags |= SHA_FLAGS_SHA256;
+ ctx->block_size = SHA256_BLOCK_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA384;
+ ctx->block_size = SHA384_BLOCK_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA512;
+ ctx->block_size = SHA512_BLOCK_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
ctx->bufcnt = 0;
- ctx->digcnt = 0;
+ ctx->digcnt[0] = 0;
+ ctx->digcnt[1] = 0;
ctx->buflen = SHA_BUFFER_LEN;
return 0;
@@ -249,19 +312,28 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
if (likely(dma)) {
- atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
+ if (!dd->caps.has_dma)
+ atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
valmr = SHA_MR_MODE_PDC;
- if (dd->flags & SHA_FLAGS_DUALBUFF)
- valmr = SHA_MR_DUALBUFF;
+ if (dd->caps.has_dualbuff)
+ valmr |= SHA_MR_DUALBUFF;
} else {
atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
}
- if (ctx->flags & SHA_FLAGS_SHA256)
+ if (ctx->flags & SHA_FLAGS_SHA1)
+ valmr |= SHA_MR_ALGO_SHA1;
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ valmr |= SHA_MR_ALGO_SHA224;
+ else if (ctx->flags & SHA_FLAGS_SHA256)
valmr |= SHA_MR_ALGO_SHA256;
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ valmr |= SHA_MR_ALGO_SHA384;
+ else if (ctx->flags & SHA_FLAGS_SHA512)
+ valmr |= SHA_MR_ALGO_SHA512;
/* Setting CR_FIRST only for the first iteration */
- if (!ctx->digcnt)
+ if (!(ctx->digcnt[0] || ctx->digcnt[1]))
valcr = SHA_CR_FIRST;
atmel_sha_write(dd, SHA_CR, valcr);
@@ -275,13 +347,15 @@ static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
int count, len32;
const u32 *buffer = (const u32 *)buf;
- dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
- ctx->digcnt, length, final);
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length, final);
atmel_sha_write_ctrl(dd, 0);
/* should be non-zero before next lines to disable clocks later */
- ctx->digcnt += length;
+ ctx->digcnt[0] += length;
+ if (ctx->digcnt[0] < length)
+ ctx->digcnt[1]++;
if (final)
dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
@@ -302,8 +376,8 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
int len32;
- dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
- ctx->digcnt, length1, final);
+ dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length1, final);
len32 = DIV_ROUND_UP(length1, sizeof(u32));
atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
@@ -317,7 +391,9 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
atmel_sha_write_ctrl(dd, 1);
/* should be non-zero before next lines to disable clocks later */
- ctx->digcnt += length1;
+ ctx->digcnt[0] += length1;
+ if (ctx->digcnt[0] < length1)
+ ctx->digcnt[1]++;
if (final)
dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
@@ -330,6 +406,86 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
return -EINPROGRESS;
}
+static void atmel_sha_dma_callback(void *data)
+{
+ struct atmel_sha_dev *dd = data;
+
+ /* dma_lch_in - completed - wait DATRDY */
+ atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+}
+
+static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ struct dma_async_tx_descriptor *in_desc;
+ struct scatterlist sg[2];
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length1, final);
+
+ if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 |
+ SHA_FLAGS_SHA256)) {
+ dd->dma_lch_in.dma_conf.src_maxburst = 16;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 16;
+ } else {
+ dd->dma_lch_in.dma_conf.src_maxburst = 32;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 32;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+
+ if (length2) {
+ sg_init_table(sg, 2);
+ sg_dma_address(&sg[0]) = dma_addr1;
+ sg_dma_len(&sg[0]) = length1;
+ sg_dma_address(&sg[1]) = dma_addr2;
+ sg_dma_len(&sg[1]) = length2;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ } else {
+ sg_init_table(sg, 1);
+ sg_dma_address(&sg[0]) = dma_addr1;
+ sg_dma_len(&sg[0]) = length1;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+ if (!in_desc)
+ return -EINVAL;
+
+ in_desc->callback = atmel_sha_dma_callback;
+ in_desc->callback_param = dd;
+
+ atmel_sha_write_ctrl(dd, 1);
+
+ /* should be non-zero before next lines to disable clocks later */
+ ctx->digcnt[0] += length1;
+ if (ctx->digcnt[0] < length1)
+ ctx->digcnt[1]++;
+
+ if (final)
+ dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= SHA_FLAGS_DMA_ACTIVE;
+
+ /* Start DMA transfer */
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ if (dd->caps.has_dma)
+ return atmel_sha_xmit_dma(dd, dma_addr1, length1,
+ dma_addr2, length2, final);
+ else
+ return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
+ dma_addr2, length2, final);
+}
+
static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -337,7 +493,6 @@ static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
atmel_sha_append_sg(ctx);
atmel_sha_fill_padding(ctx, 0);
-
bufcnt = ctx->bufcnt;
ctx->bufcnt = 0;
@@ -349,17 +504,17 @@ static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
size_t length, int final)
{
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
- SHA1_BLOCK_SIZE);
+ ctx->block_size);
return -EINVAL;
}
ctx->flags &= ~SHA_FLAGS_SG;
/* next call does not fail... so no unmap in the case of error */
- return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
+ return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
}
static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
@@ -372,8 +527,8 @@ static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
- dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
- ctx->bufcnt, ctx->digcnt, final);
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
+ ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
if (final)
atmel_sha_fill_padding(ctx, 0);
@@ -400,30 +555,25 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
if (ctx->bufcnt || ctx->offset)
return atmel_sha_update_dma_slow(dd);
- dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
- ctx->digcnt, ctx->bufcnt, ctx->total);
+ dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
+ ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
sg = ctx->sg;
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
return atmel_sha_update_dma_slow(dd);
- if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
- /* size is not SHA1_BLOCK_SIZE aligned */
+ if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
+ /* size is not ctx->block_size aligned */
return atmel_sha_update_dma_slow(dd);
length = min(ctx->total, sg->length);
if (sg_is_last(sg)) {
if (!(ctx->flags & SHA_FLAGS_FINUP)) {
- /* not last sg must be SHA1_BLOCK_SIZE aligned */
- tail = length & (SHA1_BLOCK_SIZE - 1);
+ /* not last sg must be ctx->block_size aligned */
+ tail = length & (ctx->block_size - 1);
length -= tail;
- if (length == 0) {
- /* offset where to start slow */
- ctx->offset = length;
- return atmel_sha_update_dma_slow(dd);
- }
}
}
@@ -434,7 +584,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
/* Add padding */
if (final) {
- tail = length & (SHA1_BLOCK_SIZE - 1);
+ tail = length & (ctx->block_size - 1);
length -= tail;
ctx->total += tail;
ctx->offset = length; /* offset where to start slow */
@@ -445,10 +595,10 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
atmel_sha_fill_padding(ctx, length);
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dev_err(dd->dev, "dma %u bytes error\n",
- ctx->buflen + SHA1_BLOCK_SIZE);
+ ctx->buflen + ctx->block_size);
return -EINVAL;
}
@@ -456,7 +606,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
ctx->flags &= ~SHA_FLAGS_SG;
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
+ return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
0, final);
} else {
ctx->sg = sg;
@@ -470,7 +620,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
+ return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
length, ctx->dma_addr, count, final);
}
}
@@ -483,7 +633,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
ctx->flags |= SHA_FLAGS_SG;
/* next call does not fail... so no unmap in the case of error */
- return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
+ return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
0, final);
}
@@ -498,12 +648,13 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
if (ctx->sg)
ctx->offset = 0;
}
- if (ctx->flags & SHA_FLAGS_PAD)
+ if (ctx->flags & SHA_FLAGS_PAD) {
dma_unmap_single(dd->dev, ctx->dma_addr,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
+ }
} else {
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
- SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->block_size, DMA_TO_DEVICE);
}
return 0;
@@ -515,8 +666,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err;
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
+ ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
if (ctx->flags & SHA_FLAGS_CPU)
err = atmel_sha_update_cpu(dd);
@@ -524,8 +675,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd)
err = atmel_sha_update_dma_start(dd);
/* wait for dma completion before can take more data */
- dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
- err, ctx->digcnt);
+ dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
+ err, ctx->digcnt[1], ctx->digcnt[0]);
return err;
}
@@ -562,12 +713,21 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
u32 *hash = (u32 *)ctx->digest;
int i;
- if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ if (ctx->flags & SHA_FLAGS_SHA1)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else if (ctx->flags & SHA_FLAGS_SHA256)
for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
}
static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -577,10 +737,16 @@ static void atmel_sha_copy_ready_hash(struct ahash_request *req)
if (!req->result)
return;
- if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ if (ctx->flags & SHA_FLAGS_SHA1)
memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
- else
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+ else if (ctx->flags & SHA_FLAGS_SHA256)
memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+ else
+ memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
}
static int atmel_sha_finish(struct ahash_request *req)
@@ -589,11 +755,11 @@ static int atmel_sha_finish(struct ahash_request *req)
struct atmel_sha_dev *dd = ctx->dd;
int err = 0;
- if (ctx->digcnt)
+ if (ctx->digcnt[0] || ctx->digcnt[1])
atmel_sha_copy_ready_hash(req);
- dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
- ctx->bufcnt);
+ dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
+ ctx->digcnt[0], ctx->bufcnt);
return err;
}
@@ -628,9 +794,8 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
{
clk_prepare_enable(dd->iclk);
- if (SHA_FLAGS_INIT & dd->flags) {
+ if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
- atmel_sha_dualbuff_test(dd);
dd->flags |= SHA_FLAGS_INIT;
dd->err = 0;
}
@@ -638,6 +803,23 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
return 0;
}
+static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
+{
+ return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+{
+ atmel_sha_hw_init(dd);
+
+ dd->hw_version = atmel_sha_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
struct ahash_request *req)
{
@@ -682,10 +864,9 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
if (ctx->op == SHA_OP_UPDATE) {
err = atmel_sha_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
+ if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
/* no final() after finup() */
err = atmel_sha_final_req(dd);
- }
} else if (ctx->op == SHA_OP_FINAL) {
err = atmel_sha_final_req(dd);
}
@@ -808,7 +989,7 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct atmel_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
return 0;
}
@@ -826,7 +1007,7 @@ static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
tctx->fallback = NULL;
}
-static struct ahash_alg sha_algs[] = {
+static struct ahash_alg sha_1_256_algs[] = {
{
.init = atmel_sha_init,
.update = atmel_sha_update,
@@ -875,6 +1056,79 @@ static struct ahash_alg sha_algs[] = {
},
};
+static struct ahash_alg sha_224_alg = {
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "atmel-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+};
+
+static struct ahash_alg sha_384_512_algs[] = {
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "atmel-sha384",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0x3,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "atmel-sha512",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0x3,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+};
+
static void atmel_sha_done_task(unsigned long data)
{
struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
@@ -941,32 +1195,181 @@ static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
{
int i;
- for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
- crypto_unregister_ahash(&sha_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
+ crypto_unregister_ahash(&sha_1_256_algs[i]);
+
+ if (dd->caps.has_sha224)
+ crypto_unregister_ahash(&sha_224_alg);
+
+ if (dd->caps.has_sha_384_512) {
+ for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
+ crypto_unregister_ahash(&sha_384_512_algs[i]);
+ }
}
static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
{
int err, i, j;
- for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
- err = crypto_register_ahash(&sha_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
- goto err_sha_algs;
+ goto err_sha_1_256_algs;
+ }
+
+ if (dd->caps.has_sha224) {
+ err = crypto_register_ahash(&sha_224_alg);
+ if (err)
+ goto err_sha_224_algs;
+ }
+
+ if (dd->caps.has_sha_384_512) {
+ for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ err = crypto_register_ahash(&sha_384_512_algs[i]);
+ if (err)
+ goto err_sha_384_512_algs;
+ }
}
return 0;
-err_sha_algs:
+err_sha_384_512_algs:
for (j = 0; j < i; j++)
- crypto_unregister_ahash(&sha_algs[j]);
+ crypto_unregister_ahash(&sha_384_512_algs[j]);
+ crypto_unregister_ahash(&sha_224_alg);
+err_sha_224_algs:
+ i = ARRAY_SIZE(sha_1_256_algs);
+err_sha_1_256_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&sha_1_256_algs[j]);
return err;
}
-static int __devinit atmel_sha_probe(struct platform_device *pdev)
+static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
+ struct crypto_platform_data *pdata)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask_in;
+
+ /* Try to grab DMA channel */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
+
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
+ atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan) {
+ dev_warn(dd->dev, "no DMA channel available\n");
+ return err;
+ }
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ SHA_REG_DIN(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ return 0;
+}
+
+static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+}
+
+static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
+{
+
+ dd->caps.has_dma = 0;
+ dd->caps.has_dualbuff = 0;
+ dd->caps.has_sha224 = 0;
+ dd->caps.has_sha_384_512 = 0;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xff0) {
+ case 0x410:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ break;
+ case 0x400:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ break;
+ case 0x320:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged sha version, set minimum capabilities\n");
+ break;
+ }
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_sha_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-sha" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
+
+static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
unsigned long sha_phys_size;
@@ -1018,7 +1421,7 @@ static int __devinit atmel_sha_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- sha_dd->iclk = clk_get(&pdev->dev, NULL);
+ sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
@@ -1032,6 +1435,32 @@ static int __devinit atmel_sha_probe(struct platform_device *pdev)
goto sha_io_err;
}
+ atmel_sha_hw_version_init(sha_dd);
+
+ atmel_sha_get_cap(sha_dd);
+
+ if (sha_dd->caps.has_dma) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ pdata = atmel_sha_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = PTR_ERR(pdata);
+ goto err_pdata;
+ }
+ }
+ if (!pdata->dma_slave) {
+ err = -ENXIO;
+ goto err_pdata;
+ }
+ err = atmel_sha_dma_init(sha_dd, pdata);
+ if (err)
+ goto err_sha_dma;
+
+ dev_info(dev, "using %s for DMA transfers\n",
+ dma_chan_name(sha_dd->dma_lch_in.chan));
+ }
+
spin_lock(&atmel_sha.lock);
list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
spin_unlock(&atmel_sha.lock);
@@ -1040,7 +1469,9 @@ static int __devinit atmel_sha_probe(struct platform_device *pdev)
if (err)
goto err_algs;
- dev_info(dev, "Atmel SHA1/SHA256\n");
+ dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
+ sha_dd->caps.has_sha224 ? "/SHA224" : "",
+ sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
return 0;
@@ -1048,6 +1479,10 @@ err_algs:
spin_lock(&atmel_sha.lock);
list_del(&sha_dd->list);
spin_unlock(&atmel_sha.lock);
+ if (sha_dd->caps.has_dma)
+ atmel_sha_dma_cleanup(sha_dd);
+err_sha_dma:
+err_pdata:
iounmap(sha_dd->io_base);
sha_io_err:
clk_put(sha_dd->iclk);
@@ -1063,7 +1498,7 @@ sha_dd_err:
return err;
}
-static int __devexit atmel_sha_remove(struct platform_device *pdev)
+static int atmel_sha_remove(struct platform_device *pdev)
{
static struct atmel_sha_dev *sha_dd;
@@ -1078,6 +1513,9 @@ static int __devexit atmel_sha_remove(struct platform_device *pdev)
tasklet_kill(&sha_dd->done_task);
+ if (sha_dd->caps.has_dma)
+ atmel_sha_dma_cleanup(sha_dd);
+
iounmap(sha_dd->io_base);
clk_put(sha_dd->iclk);
@@ -1093,15 +1531,16 @@ static int __devexit atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove = __devexit_p(atmel_sha_remove),
+ .remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_sha_dt_ids),
},
};
module_platform_driver(atmel_sha_driver);
-MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
+MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
index 5ac2a900d80..f86734d0fda 100644
--- a/drivers/crypto/atmel-tdes-regs.h
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -69,6 +69,8 @@
#define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0)
#define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0
+#define TDES_HW_VERSION 0xFC
+
#define TDES_RPR 0x100
#define TDES_RCR 0x104
#define TDES_TPR 0x108
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 7495f98c722..6cde5b530c6 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -38,29 +39,35 @@
#include <crypto/des.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x007f
+#define TDES_FLAGS_MODE_MASK 0x00ff
#define TDES_FLAGS_ENCRYPT BIT(0)
#define TDES_FLAGS_CBC BIT(1)
#define TDES_FLAGS_CFB BIT(2)
#define TDES_FLAGS_CFB8 BIT(3)
#define TDES_FLAGS_CFB16 BIT(4)
#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_OFB BIT(6)
+#define TDES_FLAGS_CFB64 BIT(6)
+#define TDES_FLAGS_OFB BIT(7)
#define TDES_FLAGS_INIT BIT(16)
#define TDES_FLAGS_FAST BIT(17)
#define TDES_FLAGS_BUSY BIT(18)
+#define TDES_FLAGS_DMA BIT(19)
-#define ATMEL_TDES_QUEUE_LENGTH 1
+#define ATMEL_TDES_QUEUE_LENGTH 50
#define CFB8_BLOCK_SIZE 1
#define CFB16_BLOCK_SIZE 2
#define CFB32_BLOCK_SIZE 4
-#define CFB64_BLOCK_SIZE 8
+struct atmel_tdes_caps {
+ bool has_dma;
+ u32 has_cfb_3keys;
+};
struct atmel_tdes_dev;
@@ -70,12 +77,19 @@ struct atmel_tdes_ctx {
int keylen;
u32 key[3*DES_KEY_SIZE / sizeof(u32)];
unsigned long flags;
+
+ u16 block_size;
};
struct atmel_tdes_reqctx {
unsigned long mode;
};
+struct atmel_tdes_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
+
struct atmel_tdes_dev {
struct list_head list;
unsigned long phys_base;
@@ -99,8 +113,10 @@ struct atmel_tdes_dev {
size_t total;
struct scatterlist *in_sg;
+ unsigned int nb_in_sg;
size_t in_offset;
struct scatterlist *out_sg;
+ unsigned int nb_out_sg;
size_t out_offset;
size_t buflen;
@@ -109,10 +125,16 @@ struct atmel_tdes_dev {
void *buf_in;
int dma_in;
dma_addr_t dma_addr_in;
+ struct atmel_tdes_dma dma_lch_in;
void *buf_out;
int dma_out;
dma_addr_t dma_addr_out;
+ struct atmel_tdes_dma dma_lch_out;
+
+ struct atmel_tdes_caps caps;
+
+ u32 hw_version;
};
struct atmel_tdes_drv {
@@ -207,6 +229,31 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
return 0;
}
+static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
+{
+ return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+{
+ atmel_tdes_hw_init(dd);
+
+ dd->hw_version = atmel_tdes_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
+static void atmel_tdes_dma_callback(void *data)
+{
+ struct atmel_tdes_dev *dd = data;
+
+ /* dma_lch_out - completed */
+ tasklet_schedule(&dd->done_task);
+}
+
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
@@ -217,7 +264,9 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
if (err)
return err;
- atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+ if (!dd->caps.has_dma)
+ atmel_tdes_write(dd, TDES_PTCR,
+ TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
/* MR register must be set before IV registers */
if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
@@ -241,6 +290,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_CFBS_16b;
else if (dd->flags & TDES_FLAGS_CFB32)
valmr |= TDES_MR_CFBS_32b;
+ else if (dd->flags & TDES_FLAGS_CFB64)
+ valmr |= TDES_MR_CFBS_64b;
} else if (dd->flags & TDES_FLAGS_OFB) {
valmr |= TDES_MR_OPMOD_OFB;
}
@@ -262,7 +313,7 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
return 0;
}
-static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
{
int err = 0;
size_t count;
@@ -288,7 +339,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
return err;
}
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
{
int err = -ENOMEM;
@@ -333,7 +384,7 @@ err_alloc:
return err;
}
-static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
{
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
@@ -343,7 +394,7 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -379,7 +430,76 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
+static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
+{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd = ctx->dd;
+ struct scatterlist sg[2];
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+
+ dd->dma_size = length;
+
+ if (!(dd->flags & TDES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
+
+ if (dd->flags & TDES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & TDES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+
+ dd->flags |= TDES_FLAGS_DMA;
+
+ sg_init_table(&sg[0], 1);
+ sg_dma_address(&sg[0]) = dma_addr_in;
+ sg_dma_len(&sg[0]) = length;
+
+ sg_init_table(&sg[1], 1);
+ sg_dma_address(&sg[1]) = dma_addr_out;
+ sg_dma_len(&sg[1]) = length;
+
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc)
+ return -EINVAL;
+
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!out_desc)
+ return -EINVAL;
+
+ out_desc->callback = atmel_tdes_dma_callback;
+ out_desc->callback_param = dd;
+
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(dd->dma_lch_out.chan);
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return 0;
+}
+
+static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
crypto_ablkcipher_reqtfm(dd->req));
@@ -387,23 +507,23 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
size_t count;
dma_addr_t addr_in, addr_out;
- if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
+ if ((!dd->in_offset) && (!dd->out_offset)) {
/* check for alignment */
- in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
- out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
-
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
fast = in && out;
+
+ if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+ fast = 0;
}
+
if (fast) {
count = min(dd->total, sg_dma_len(dd->in_sg));
count = min(count, sg_dma_len(dd->out_sg));
- if (count != dd->total) {
- pr_err("request length != buffer length\n");
- return -EINVAL;
- }
-
err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
if (!err) {
dev_err(dd->dev, "dma_map_sg() error\n");
@@ -433,13 +553,16 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
addr_out = dd->dma_addr_out;
dd->flags &= ~TDES_FLAGS_FAST;
-
}
dd->total -= count;
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
- if (err) {
+ if (dd->caps.has_dma)
+ err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ else
+ err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+
+ if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
}
@@ -447,7 +570,6 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
return err;
}
-
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct ablkcipher_request *req = dd->req;
@@ -506,7 +628,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
err = atmel_tdes_write_ctrl(dd);
if (!err)
- err = atmel_tdes_crypt_dma_start(dd);
+ err = atmel_tdes_crypt_start(dd);
if (err) {
/* des_task will not finish it, so do it here */
atmel_tdes_finish_req(dd, err);
@@ -516,41 +638,136 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
return ret;
}
+static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+{
+ int err = -EINVAL;
+ size_t count;
+
+ if (dd->flags & TDES_FLAGS_DMA) {
+ err = 0;
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
+ }
+ return err;
+}
static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct atmel_tdes_dev *dd;
if (mode & TDES_FLAGS_CFB8) {
if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
+ ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB16) {
if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
+ ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB32) {
if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
- } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
- return -EINVAL;
+ ctx->block_size = CFB32_BLOCK_SIZE;
+ } else {
+ if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of DES blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = DES_BLOCK_SIZE;
}
- dd = atmel_tdes_find_dev(ctx);
- if (!dd)
- return -ENODEV;
-
rctx->mode = mode;
- return atmel_tdes_handle_queue(dd, req);
+ return atmel_tdes_handle_queue(ctx->dd, req);
+}
+
+static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
+ struct crypto_platform_data *pdata)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Try to grab 2 DMA channels */
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+ atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ TDES_IDATA1R;
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+ atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ TDES_ODATA1R;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
+
+ return 0;
+
+err_dma_out:
+ dma_release_channel(dd->dma_lch_in.chan);
+err_dma_in:
+ dev_warn(dd->dev, "no DMA channel available\n");
+ return err;
+}
+
+static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+ dma_release_channel(dd->dma_lch_out.chan);
}
static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -590,7 +807,8 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* HW bug in cfb 3-keys mode.
*/
- if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) {
+ if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
+ && (keylen != 2*DES_KEY_SIZE)) {
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
@@ -678,8 +896,15 @@ static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd;
+
tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+ dd = atmel_tdes_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+
return 0;
}
@@ -695,7 +920,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -715,7 +940,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -736,7 +961,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -778,7 +1003,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -799,7 +1024,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -820,7 +1045,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -841,7 +1066,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -861,7 +1086,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -882,7 +1107,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -924,7 +1149,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -945,7 +1170,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -966,7 +1191,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -994,14 +1219,24 @@ static void atmel_tdes_done_task(unsigned long data)
struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
int err;
- err = atmel_tdes_crypt_dma_stop(dd);
+ if (!(dd->flags & TDES_FLAGS_DMA))
+ err = atmel_tdes_crypt_pdc_stop(dd);
+ else
+ err = atmel_tdes_crypt_dma_stop(dd);
err = dd->err ? : err;
if (dd->total && !err) {
- err = atmel_tdes_crypt_dma_start(dd);
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dd->in_sg = sg_next(dd->in_sg);
+ dd->out_sg = sg_next(dd->out_sg);
+ if (!dd->in_sg || !dd->out_sg)
+ err = -EINVAL;
+ }
+ if (!err)
+ err = atmel_tdes_crypt_start(dd);
if (!err)
- return;
+ return; /* DMA started. Not fininishing. */
}
atmel_tdes_finish_req(dd, err);
@@ -1053,9 +1288,72 @@ err_tdes_algs:
return err;
}
-static int __devinit atmel_tdes_probe(struct platform_device *pdev)
+static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
+{
+
+ dd->caps.has_dma = 0;
+ dd->caps.has_cfb_3keys = 0;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xf00) {
+ case 0x700:
+ dd->caps.has_dma = 1;
+ dd->caps.has_cfb_3keys = 1;
+ break;
+ case 0x600:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged tdes version, set minimum capabilities\n");
+ break;
+ }
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_tdes_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-tdes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
+
+static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
unsigned long tdes_phys_size;
@@ -1109,7 +1407,7 @@ static int __devinit atmel_tdes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- tdes_dd->iclk = clk_get(&pdev->dev, NULL);
+ tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk");
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
@@ -1123,9 +1421,36 @@ static int __devinit atmel_tdes_probe(struct platform_device *pdev)
goto tdes_io_err;
}
- err = atmel_tdes_dma_init(tdes_dd);
+ atmel_tdes_hw_version_init(tdes_dd);
+
+ atmel_tdes_get_cap(tdes_dd);
+
+ err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_tdes_buff;
+
+ if (tdes_dd->caps.has_dma) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ pdata = atmel_tdes_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = PTR_ERR(pdata);
+ goto err_pdata;
+ }
+ }
+ if (!pdata->dma_slave) {
+ err = -ENXIO;
+ goto err_pdata;
+ }
+ err = atmel_tdes_dma_init(tdes_dd, pdata);
+ if (err)
+ goto err_tdes_dma;
+
+ dev_info(dev, "using %s, %s for DMA transfers\n",
+ dma_chan_name(tdes_dd->dma_lch_in.chan),
+ dma_chan_name(tdes_dd->dma_lch_out.chan));
+ }
spin_lock(&atmel_tdes.lock);
list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
@@ -1143,8 +1468,12 @@ err_algs:
spin_lock(&atmel_tdes.lock);
list_del(&tdes_dd->list);
spin_unlock(&atmel_tdes.lock);
- atmel_tdes_dma_cleanup(tdes_dd);
+ if (tdes_dd->caps.has_dma)
+ atmel_tdes_dma_cleanup(tdes_dd);
err_tdes_dma:
+err_pdata:
+ atmel_tdes_buff_cleanup(tdes_dd);
+err_tdes_buff:
iounmap(tdes_dd->io_base);
tdes_io_err:
clk_put(tdes_dd->iclk);
@@ -1162,7 +1491,7 @@ tdes_dd_err:
return err;
}
-static int __devexit atmel_tdes_remove(struct platform_device *pdev)
+static int atmel_tdes_remove(struct platform_device *pdev)
{
static struct atmel_tdes_dev *tdes_dd;
@@ -1178,7 +1507,10 @@ static int __devexit atmel_tdes_remove(struct platform_device *pdev)
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
- atmel_tdes_dma_cleanup(tdes_dd);
+ if (tdes_dd->caps.has_dma)
+ atmel_tdes_dma_cleanup(tdes_dd);
+
+ atmel_tdes_buff_cleanup(tdes_dd);
iounmap(tdes_dd->io_base);
@@ -1195,10 +1527,11 @@ static int __devexit atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove = __devexit_p(atmel_tdes_remove),
+ .remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
},
};
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 5398580b431..b099e33cb07 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -29,10 +29,11 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <asm/blackfin.h>
-#include <asm/bfin_crc.h>
#include <asm/dma.h>
#include <asm/portmux.h>
+#include <asm/io.h>
+
+#include "bfin_crc.h"
#define CRC_CCRYPTO_QUEUE_LENGTH 5
@@ -54,12 +55,13 @@ struct bfin_crypto_crc {
int irq;
int dma_ch;
u32 poly;
- volatile struct crc_register *regs;
+ struct crc_register *regs;
struct ahash_request *req; /* current request in operation */
struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
u8 *sg_mid_buf;
+ dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
struct tasklet_struct done_task;
struct crypto_queue queue; /* waiting requests */
@@ -132,14 +134,13 @@ static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nent
static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
{
- crc->regs->datacntrld = 0;
- crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET;
- crc->regs->curresult = key;
+ writel(0, &crc->regs->datacntrld);
+ writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
+ writel(key, &crc->regs->curresult);
/* setup CRC interrupts */
- crc->regs->status = CMPERRI | DCNTEXPI;
- crc->regs->intrenset = CMPERRI | DCNTEXPI;
- SSYNC();
+ writel(CMPERRI | DCNTEXPI, &crc->regs->status);
+ writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
return 0;
}
@@ -151,7 +152,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
struct bfin_crypto_crc *crc;
- dev_dbg(crc->dev, "crc_init\n");
+ dev_dbg(ctx->crc->dev, "crc_init\n");
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
crc_ctx->crc = crc;
@@ -160,7 +161,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
spin_unlock_bh(&crc_list.lock);
if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
- dev_dbg(crc->dev, "init: requested sg list is too big > %d\n",
+ dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
CRC_MAX_DMA_DESC);
return -EINVAL;
}
@@ -175,7 +176,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
/* init crc results */
put_unaligned_le32(crc_ctx->key, req->result);
- dev_dbg(crc->dev, "init: digest size: %d\n",
+ dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
@@ -195,7 +196,6 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
dma_addr = sg_dma_address(sg);
/* deduce extra bytes in last sg */
if (sg_is_last(sg))
@@ -208,12 +208,29 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
bytes in current sg buffer. Move addr of current
sg and deduce the length of current sg.
*/
- memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count,
- (void *)dma_addr,
+ memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
+ sg_virt(sg),
CHKSUM_DIGEST_SIZE - mid_dma_count);
dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
+
+ dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
+ DMAEN | PSIZE_32 | WDSIZE_32;
+
+ /* setup new dma descriptor for next middle dma */
+ crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
+ crc->sg_cpu[i].cfg = dma_config;
+ crc->sg_cpu[i].x_count = 1;
+ crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
+ dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
+ "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
+ i, crc->sg_cpu[i].start_addr,
+ crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
+ crc->sg_cpu[i].x_modify);
+ i++;
}
+
+ dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
/* chop current sg dma len to multiple of 32 bits */
mid_dma_count = dma_count % 4;
dma_count &= ~0x3;
@@ -244,24 +261,9 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
if (mid_dma_count) {
/* copy extra bytes to next middle dma buffer */
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
- DMAEN | PSIZE_32 | WDSIZE_32;
memcpy(crc->sg_mid_buf + (i << 2),
- (void *)(dma_addr + (dma_count << 2)),
+ (u8*)sg_virt(sg) + (dma_count << 2),
mid_dma_count);
- /* setup new dma descriptor for next middle dma */
- crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
- crc->sg_mid_buf + (i << 2),
- CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = 1;
- crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
}
}
@@ -285,17 +287,12 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
if (i == 0)
return;
- flush_dcache_range((unsigned int)crc->sg_cpu,
- (unsigned int)crc->sg_cpu +
- i * sizeof(struct dma_desc_array));
-
/* Set the last descriptor to stop mode */
crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
crc->sg_cpu[i - 1].cfg |= DI_EN;
set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
set_dma_x_count(crc->dma_ch, 0);
set_dma_x_modify(crc->dma_ch, 0);
- SSYNC();
set_dma_config(crc->dma_ch, dma_config);
}
@@ -309,6 +306,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
int nsg, i, j;
unsigned int nextlen;
unsigned long flags;
+ u32 reg;
spin_lock_irqsave(&crc->lock, flags);
if (req)
@@ -408,14 +406,14 @@ finish_update:
ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
/* set CRC data count before start DMA */
- crc->regs->datacnt = ctx->sg_buflen >> 2;
+ writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
/* setup and enable CRC DMA */
bfin_crypto_crc_config_dma(crc);
/* finally kick off CRC operation */
- crc->regs->control |= BLKEN;
- SSYNC();
+ reg = readl(&crc->regs->control);
+ writel(reg | BLKEN, &crc->regs->control);
return -EINPROGRESS;
}
@@ -536,15 +534,17 @@ static void bfin_crypto_crc_done_task(unsigned long data)
static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
{
struct bfin_crypto_crc *crc = dev_id;
+ u32 reg;
- if (crc->regs->status & DCNTEXP) {
- crc->regs->status = DCNTEXP;
- SSYNC();
+ if (readl(&crc->regs->status) & DCNTEXP) {
+ writel(DCNTEXP, &crc->regs->status);
/* prepare results */
- put_unaligned_le32(crc->regs->result, crc->req->result);
+ put_unaligned_le32(readl(&crc->regs->result),
+ crc->req->result);
- crc->regs->control &= ~BLKEN;
+ reg = readl(&crc->regs->control);
+ writel(reg & ~BLKEN, &crc->regs->control);
crc->busy = 0;
if (crc->req->base.complete)
@@ -568,7 +568,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
int i = 100000;
- while ((crc->regs->control & BLKEN) && --i)
+ while ((readl(&crc->regs->control) & BLKEN) && --i)
cpu_relax();
if (i == 0)
@@ -586,7 +586,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
* bfin_crypto_crc_probe - Initialize module
*
*/
-static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
+static int bfin_crypto_crc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -594,7 +594,7 @@ static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
unsigned int timeout = 100000;
int ret;
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
if (!crc) {
dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
return -ENOMEM;
@@ -610,42 +610,39 @@ static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- ret = -ENOENT;
- goto out_error_free_mem;
+ return -ENOENT;
}
- crc->regs = ioremap(res->start, resource_size(res));
- if (!crc->regs) {
+ crc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR((void *)crc->regs)) {
dev_err(&pdev->dev, "Cannot map CRC IO\n");
- ret = -ENXIO;
- goto out_error_free_mem;
+ return PTR_ERR((void *)crc->regs);
}
crc->irq = platform_get_irq(pdev, 0);
if (crc->irq < 0) {
dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
- ret = -ENOENT;
- goto out_error_unmap;
+ return -ENOENT;
}
- ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc);
+ ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
+ IRQF_SHARED, dev_name(dev), crc);
if (ret) {
dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
- goto out_error_unmap;
+ return ret;
}
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No CRC DMA channel specified\n");
- ret = -ENOENT;
- goto out_error_irq;
+ return -ENOENT;
}
crc->dma_ch = res->start;
ret = request_dma(crc->dma_ch, dev_name(dev));
if (ret) {
dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
- goto out_error_irq;
+ return ret;
}
crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
@@ -658,31 +655,32 @@ static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
* 1 last + 1 next dma descriptors
*/
crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
+ crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
+ * ((CRC_MAX_DMA_DESC + 1) << 1);
- crc->regs->control = 0;
- SSYNC();
- crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
- SSYNC();
+ writel(0, &crc->regs->control);
+ crc->poly = (u32)pdev->dev.platform_data;
+ writel(crc->poly, &crc->regs->poly);
- while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
+ while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
cpu_relax();
if (timeout == 0)
dev_info(&pdev->dev, "init crc poly timeout\n");
+ platform_set_drvdata(pdev, crc);
+
spin_lock(&crc_list.lock);
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
- platform_set_drvdata(pdev, crc);
-
- ret = crypto_register_ahash(&algs);
- if (ret) {
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
- dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
- goto out_error_dma;
+ if (list_is_singular(&crc_list.dev_list)) {
+ ret = crypto_register_ahash(&algs);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register crypto ahash device\n");
+ goto out_error_dma;
+ }
}
dev_info(&pdev->dev, "initialized\n");
@@ -693,12 +691,6 @@ out_error_dma:
if (crc->sg_cpu)
dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
free_dma(crc->dma_ch);
-out_error_irq:
- free_irq(crc->irq, crc->dev);
-out_error_unmap:
- iounmap((void *)crc->regs);
-out_error_free_mem:
- kfree(crc);
return ret;
}
@@ -707,7 +699,7 @@ out_error_free_mem:
* bfin_crypto_crc_remove - Initialize module
*
*/
-static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
+static int bfin_crypto_crc_remove(struct platform_device *pdev)
{
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
@@ -720,18 +712,14 @@ static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
crypto_unregister_ahash(&algs);
tasklet_kill(&crc->done_task);
- iounmap((void *)crc->regs);
free_dma(crc->dma_ch);
- if (crc->irq > 0)
- free_irq(crc->irq, crc->dev);
- kfree(crc);
return 0;
}
static struct platform_driver bfin_crypto_crc_driver = {
.probe = bfin_crypto_crc_probe,
- .remove = __devexit_p(bfin_crypto_crc_remove),
+ .remove = bfin_crypto_crc_remove,
.suspend = bfin_crypto_crc_suspend,
.resume = bfin_crypto_crc_resume,
.driver = {
diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h
new file mode 100644
index 00000000000..75cef4dc85a
--- /dev/null
+++ b/drivers/crypto/bfin_crc.h
@@ -0,0 +1,125 @@
+/*
+ * bfin_crc.h - interface to Blackfin CRC controllers
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_CRC_H__
+#define __BFIN_CRC_H__
+
+/* Function driver which use hardware crc must initialize the structure */
+struct crc_info {
+ /* Input data address */
+ unsigned char *in_addr;
+ /* Output data address */
+ unsigned char *out_addr;
+ /* Input or output bytes */
+ unsigned long datasize;
+ union {
+ /* CRC to compare with that of input buffer */
+ unsigned long crc_compare;
+ /* Value to compare with input data */
+ unsigned long val_verify;
+ /* Value to fill */
+ unsigned long val_fill;
+ };
+ /* Value to program the 32b CRC Polynomial */
+ unsigned long crc_poly;
+ union {
+ /* CRC calculated from the input data */
+ unsigned long crc_result;
+ /* First failed position to verify input data */
+ unsigned long pos_verify;
+ };
+ /* CRC mirror flags */
+ unsigned int bitmirr:1;
+ unsigned int bytmirr:1;
+ unsigned int w16swp:1;
+ unsigned int fdsel:1;
+ unsigned int rsltmirr:1;
+ unsigned int polymirr:1;
+ unsigned int cmpmirr:1;
+};
+
+/* Userspace interface */
+#define CRC_IOC_MAGIC 'C'
+#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
+#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
+#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
+#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
+
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+
+struct crc_register {
+ u32 control;
+ u32 datacnt;
+ u32 datacntrld;
+ u32 __pad_1[2];
+ u32 compare;
+ u32 fillval;
+ u32 datafifo;
+ u32 intren;
+ u32 intrenset;
+ u32 intrenclr;
+ u32 poly;
+ u32 __pad_2[4];
+ u32 status;
+ u32 datacntcap;
+ u32 __pad_3;
+ u32 result;
+ u32 curresult;
+ u32 __pad_4[3];
+ u32 revid;
+};
+
+/* CRC_STATUS Masks */
+#define CMPERR 0x00000002 /* Compare error */
+#define DCNTEXP 0x00000010 /* datacnt register expired */
+#define IBR 0x00010000 /* Input buffer ready */
+#define OBR 0x00020000 /* Output buffer ready */
+#define IRR 0x00040000 /* Immediate result readt */
+#define LUTDONE 0x00080000 /* Look-up table generation done */
+#define FSTAT 0x00700000 /* FIFO status */
+#define MAX_FIFO 4 /* Max fifo size */
+
+/* CRC_CONTROL Masks */
+#define BLKEN 0x00000001 /* Block enable */
+#define OPMODE 0x000000F0 /* Operation mode */
+#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
+#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
+#define MODE_DATA_FILL 2 /* MTM data fill */
+#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
+#define MODE_DATA_VERIFY 4 /* MSM data verify */
+#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
+#define AUTOCLRF 0x00000200 /* Auto clear to one */
+#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
+#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
+#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
+#define BITMIRR_OFFSET 16 /* Mirror bits offset */
+#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
+#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
+#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
+#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
+#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
+#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
+#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
+#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
+#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
+#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
+#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
+#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
+
+/* CRC_INTREN Masks */
+#define CMPERRI 0x02 /* CRC_ERROR_INTR */
+#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
+
+#endif
+
+#endif
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 65c7668614a..e7555ff4caf 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
- This module adds a job ring operation interface, and configures h/w
+ This module creates job ring devices, and configures h/w
to operate as a DPAA component automatically, depending
on h/w feature availability.
To compile this driver as a module, choose M here: the module
will be called caam.
+config CRYPTO_DEV_FSL_CAAM_JR
+ tristate "Freescale CAAM Job Ring driver backend"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ help
+ Enables the driver module for Job Rings which are part of
+ Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM). This module adds a job ring operation
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam_jr.
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default n
help
Enable the Job Ring's interrupt coalescing feature.
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
@@ -76,9 +89,9 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
- select CRYPTO_AHASH
+ select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -98,3 +111,11 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default n
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index b1eb44838db..550758a333e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -1,10 +1,15 @@
#
# Makefile for the CAAM backend and dependent components
#
+ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+ EXTRA_CFLAGS := -DDEBUG
+endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-caam-objs := ctrl.o jr.o error.o key_gen.o
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b2a0a0726a5..c09ce1f040d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -65,13 +65,15 @@
#define CAAM_MAX_IV_LENGTH 16
/* length of descriptors text */
-#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
-
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
@@ -84,12 +86,11 @@
#ifdef DEBUG
/* for print_hex_dumps with line references */
-#define xstr(s) str(s)
-#define str(s) #s
#define debug(format, arg...) printk(format, arg)
#else
#define debug(format, arg...)
#endif
+static struct list_head alg_list;
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
@@ -107,27 +108,14 @@ static inline void append_dec_op1(u32 *desc, u32 type)
}
/*
- * Wait for completion of class 1 key loading before allowing
- * error propagation
- */
-static inline void append_dec_shr_done(u32 *desc)
-{
- u32 *jump_cmd;
-
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
-/*
* For aead functions, read payload and write payload,
* both of which are specified in req->src and req->dst
*/
static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
/*
@@ -214,9 +202,197 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
append_key_aead(desc, ctx, keys_fit_inline);
set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+static int aead_null_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /*
+ * NULL encryption; IV is zero
+ * assoclen = (assoclen + cryptlen) - cryptlen
+ */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -225,13 +401,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd;
u32 geniv, moveiv;
u32 *desc;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->authsize)
return 0;
+ /* NULL encryption / decryption */
+ if (!ctx->enckeylen)
+ return aead_null_set_sh_desc(aead);
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
@@ -256,7 +435,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* assoclen + cryptlen = seqinlen - ivsize */
append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
- /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
/* read assoc before reading payload */
@@ -285,7 +464,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -294,35 +473,24 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ keys_fit_inline = false;
if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- desc = ctx->sh_desc_dec;
-
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline);
+ desc = ctx->sh_desc_dec;
- /* Only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize */
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize)
+ ctx->authsize + tfm->ivsize);
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
@@ -343,7 +511,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load ICV */
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
- append_dec_shr_done(desc);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
@@ -353,7 +520,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -362,6 +529,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ keys_fit_inline = false;
if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
@@ -436,7 +604,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -470,24 +638,10 @@ static int aead_setkey(struct crypto_aead *aead,
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen;
- unsigned int enckeylen;
+ struct crypto_authenc_keys keys;
int ret = 0;
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (keylen > CAAM_MAX_KEY_SIZE)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
/* Pick class 2 key length from algorithm submask */
@@ -495,41 +649,45 @@ static int aead_setkey(struct crypto_aead *aead,
OP_ALG_ALGSEL_SHIFT] * 2;
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+ if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
- keylen, enckeylen, authkeylen);
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, key, authkeylen);
+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+ memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
- enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + enckeylen, 1);
+ ctx->split_key_pad_len + keys.enckeylen, 1);
#endif
- ctx->enckeylen = enckeylen;
+ ctx->enckeylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
- enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, DMA_TO_DEVICE);
}
return ret;
@@ -545,11 +703,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
struct device *jrdev = ctx->jrdev;
int ret = 0;
- u32 *key_jump_cmd, *jump_cmd;
+ u32 *key_jump_cmd;
u32 *desc;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
@@ -576,9 +734,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, key_jump_cmd);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Load iv */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | tfm->ivsize);
@@ -598,7 +753,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -615,11 +771,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
- /* For aead, only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
/* load IV */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -631,9 +783,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Perform operation */
ablkcipher_append_src_dst(desc);
- /* Wait for key to load before allowing propagating error */
- append_dec_shr_done(desc);
-
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -643,7 +792,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -771,22 +921,19 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct aead_edesc *)((char *)desc -
offsetof(struct aead_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
req->assoclen , 1);
- print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : req->cryptlen +
ctx->authsize + 4, 1);
@@ -814,19 +961,16 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
offsetof(struct aead_edesc, hw_desc));
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
- req->cryptlen, 1);
+ req->cryptlen - ctx->authsize, 1);
#endif
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
@@ -837,7 +981,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
err = -EBADMSG;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
sizeof(struct iphdr) + req->assoclen +
@@ -845,7 +989,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ctx->authsize + 36, 1);
if (!err && edesc->sec4_sg_bytes) {
struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
- print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
sg->length + ctx->authsize + 16, 1);
}
@@ -871,17 +1015,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ablkcipher_edesc *)((char *)desc -
offsetof(struct ablkcipher_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
@@ -906,17 +1047,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ablkcipher_edesc *)((char *)desc -
offsetof(struct ablkcipher_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
@@ -947,16 +1085,16 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
req->assoclen, req->cryptlen, authsize);
- print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
req->assoclen , 1);
- print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : req->cryptlen, 1);
- print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1);
#endif
@@ -973,12 +1111,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
(edesc->src_nents ? : 1);
in_options = LDST_SGF;
}
- if (encrypt)
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
- else
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen, in_options);
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (likely(req->src == req->dst)) {
if (all_contig) {
@@ -999,7 +1134,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
}
}
if (encrypt)
- append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+ out_options);
else
append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
out_options);
@@ -1025,15 +1161,15 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
req->assoclen, req->cryptlen, authsize);
- print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
req->assoclen , 1);
- print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1);
#endif
@@ -1049,8 +1185,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (contig & GIV_DST_CONTIG) {
dst_dma = edesc->iv_dma;
@@ -1067,7 +1203,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
}
- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+ out_options);
}
/*
@@ -1086,10 +1223,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : req->nbytes, 1);
#endif
@@ -1131,7 +1268,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- int desc_bytes, bool *all_contig_ptr)
+ int desc_bytes, bool *all_contig_ptr,
+ bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1146,15 +1284,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool assoc_chained = false, src_chained = false, dst_chained = false;
int ivsize = crypto_aead_ivsize(aead);
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+ dst_nents = sg_count(req->dst,
+ req->cryptlen +
+ (encrypt ? authsize : (-authsize)),
+ &dst_chained);
+ } else {
+ src_nents = sg_count(req->src,
+ req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ }
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1235,11 +1383,9 @@ static int aead_encrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1247,7 +1393,7 @@ static int aead_encrypt(struct aead_request *req)
init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
all_contig, true);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
@@ -1276,12 +1422,12 @@ static int aead_decrypt(struct aead_request *req)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
req->cryptlen, 1);
#endif
@@ -1290,7 +1436,7 @@ static int aead_decrypt(struct aead_request *req)
init_aead_job(ctx->sh_desc_dec,
ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
@@ -1333,10 +1479,11 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+ &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1427,8 +1574,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
CAAM_CMD_SZ, &contig);
@@ -1437,7 +1582,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
return PTR_ERR(edesc);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
req->cryptlen, 1);
#endif
@@ -1446,7 +1591,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
init_aead_giv_job(ctx->sh_desc_givenc,
ctx->sh_desc_givenc_dma, edesc, req, contig);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
@@ -1463,6 +1608,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
return ret;
}
+static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
+{
+ return aead_encrypt(&areq->areq);
+}
+
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -1546,7 +1696,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
sec4_sg_bytes, 1);
#endif
@@ -1575,7 +1725,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
init_ablkcipher_job(ctx->sh_desc_enc,
ctx->sh_desc_enc_dma, edesc, req, iv_contig);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
@@ -1613,7 +1763,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
ctx->sh_desc_dec_dma, edesc, req, iv_contig);
desc = edesc->hw_desc;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
@@ -1650,11 +1800,125 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
- /*
- * single-pass ipsec_esp descriptor
- * authencesn(*,*) is also registered, although not present
- * explicitly here.
- */
+ /* single-pass ipsec_esp descriptor */
+ {
+ .name = "authenc(hmac(md5),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
@@ -1697,6 +1961,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(aes))",
.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1736,6 +2001,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(aes))",
.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1814,6 +2080,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1853,6 +2120,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1930,6 +2198,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(des))",
.driver_name = "authenc-hmac-sha224-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1969,6 +2238,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(des))",
.driver_name = "authenc-hmac-sha384-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -2057,7 +2327,6 @@ static struct caam_alg_template driver_algs[] = {
struct caam_crypto_alg {
struct list_head entry;
- struct device *ctrldev;
int class1_alg_type;
int class2_alg_type;
int alg_op;
@@ -2070,14 +2339,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
struct caam_crypto_alg *caam_alg =
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
- /*
- * distribute tfms across job rings to ensure in-order
- * crypto request processing per tfm
- */
- ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2371,31 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
+ if (ctx->key_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->key_dma))
+ dma_unmap_single(ctx->jrdev, ctx->key_dma,
+ ctx->enckeylen + ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!alg_list.next)
return;
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->alg_list.next)
- return;
-
- list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
- struct caam_alg_template
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
*template)
{
struct caam_crypto_alg *t_alg;
@@ -2149,7 +2403,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -2181,81 +2435,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->alg_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&alg_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
- bool done = false;
-authencesn:
- t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+ t_alg = caam_alg_alloc(&driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_algs[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_algs[i].driver_name);
continue;
}
err = crypto_register_alg(&t_alg->crypto_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
- } else {
- list_add_tail(&t_alg->entry, &priv->alg_list);
- if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
- !memcmp(driver_algs[i].name, "authenc", 7) &&
- !done) {
- char *name;
-
- name = driver_algs[i].name;
- memmove(name + 10, name + 7, strlen(name) - 7);
- memcpy(name + 7, "esn", 3);
-
- name = driver_algs[i].driver_name;
- memmove(name + 10, name + 7, strlen(name) - 7);
- memcpy(name + 7, "esn", 3);
-
- done = true;
- goto authencesn;
- }
- }
+ } else
+ list_add_tail(&t_alg->entry, &alg_list);
}
- if (!list_empty(&priv->alg_list))
- dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
- (char *)of_get_property(dev_node, "compatible", NULL));
+ if (!list_empty(&alg_list))
+ pr_info("caam algorithms registered in /proc/crypto\n");
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 32aba7a6150..0d9284ef96a 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,8 +72,6 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
-
#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
@@ -91,13 +89,14 @@
#ifdef DEBUG
/* for print_hex_dumps with line references */
-#define xstr(s) str(s)
-#define str(s) #s
#define debug(format, arg...) printk(format, arg)
#else
#define debug(format, arg...)
#endif
+
+static struct list_head hash_list;
+
/* ahash per-session context */
struct caam_hash_ctx {
struct device *jrdev;
@@ -331,7 +330,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR,
+ "ahash update shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -349,7 +349,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR,
+ "ahash update first shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -366,7 +367,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -384,7 +385,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -403,7 +404,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return -ENOMEM;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR,
+ "ahash digest shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -411,7 +413,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
@@ -420,7 +422,7 @@ static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Digest hash size if it is too large */
-static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
@@ -429,7 +431,7 @@ static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dma_addr_t src_dma, dst_dma;
int ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
@@ -464,9 +466,9 @@ static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
LDST_SRCDST_BYTE_CONTEXT);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -479,7 +481,8 @@ static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
wait_for_completion_interruptible(&result.completion);
ret = result.err;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR,
+ "digested key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in,
digestsize, 1);
#endif
@@ -530,7 +533,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#ifdef DEBUG
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
@@ -542,10 +545,11 @@ static int ahash_setkey(struct crypto_ahash *ahash,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto map_err;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len, 1);
#endif
@@ -556,6 +560,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
DMA_TO_DEVICE);
}
+map_err:
kfree(hashed_key);
return ret;
badkey:
@@ -628,21 +633,18 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
@@ -666,21 +668,18 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
@@ -704,21 +703,18 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
@@ -742,21 +738,18 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
kfree(edesc);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
@@ -852,7 +845,7 @@ static int ahash_update_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -871,9 +864,9 @@ static int ahash_update_ctx(struct ahash_request *req)
*next_buflen = last_buflen;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
@@ -937,7 +930,7 @@ static int ahash_final_ctx(struct ahash_request *req)
digestsize);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -1016,7 +1009,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
digestsize);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -1086,7 +1079,7 @@ static int ahash_digest(struct ahash_request *req)
digestsize);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -1140,7 +1133,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
edesc->src_nents = 0;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -1228,7 +1221,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -1250,9 +1243,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
*next_buflen = 0;
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
@@ -1321,7 +1314,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
digestsize);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -1414,7 +1407,7 @@ static int ahash_update_first(struct ahash_request *req)
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
@@ -1438,7 +1431,7 @@ static int ahash_update_first(struct ahash_request *req)
sg_copy(next_buf, req->src, req->nbytes);
}
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
@@ -1653,7 +1646,6 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
- struct device *ctrldev;
int alg_type;
int alg_op;
struct ahash_alg ahash_alg;
@@ -1670,7 +1662,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1678,15 +1669,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
int ret = 0;
/*
- * distribute tfms across job rings to ensure in-order
+ * Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm
*/
- ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
-
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
@@ -1729,35 +1722,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_hash_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_hash_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return;
-
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->hash_list.next)
+ if (!hash_list.next)
return;
- list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
@@ -1765,7 +1741,7 @@ static void __exit caam_algapi_hash_exit(void)
}
static struct caam_hash_alg *
-caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+caam_hash_alloc(struct caam_hash_template *template,
bool keyed)
{
struct caam_hash_alg *t_alg;
@@ -1774,7 +1750,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -1805,37 +1781,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_hash_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->hash_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
@@ -1843,38 +1797,38 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_alg *t_alg;
/* register hmac version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ t_alg = caam_hash_alloc(&driver_hash[i], true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ t_alg = caam_hash_alloc(&driver_hash[i], false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
}
return err;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c..8c07d3153f1 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -76,7 +76,7 @@ struct caam_rng_ctx {
struct buf_data bufs[2];
};
-static struct caam_rng_ctx rng_ctx;
+static struct caam_rng_ctx *rng_ctx;
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
@@ -103,11 +103,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
bd = (struct buf_data *)((char *)desc -
offsetof(struct buf_data, hw_desc));
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ if (err)
+ caam_jr_strstatus(jrdev, err);
atomic_set(&bd->empty, BUF_NOT_EMPTY);
complete(&bd->filled);
@@ -137,7 +134,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct caam_rng_ctx *ctx = &rng_ctx;
+ struct caam_rng_ctx *ctx = rng_ctx;
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
int next_buf_idx, copied_idx;
int err;
@@ -237,12 +234,12 @@ static void caam_cleanup(struct hwrng *rng)
struct buf_data *bd;
for (i = 0; i < 2; i++) {
- bd = &rng_ctx.bufs[i];
+ bd = &rng_ctx->bufs[i];
if (atomic_read(&bd->empty) == BUF_PENDING)
wait_for_completion(&bd->filled);
}
- rng_unmap_ctx(&rng_ctx);
+ rng_unmap_ctx(rng_ctx);
}
static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
@@ -273,34 +270,26 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void)
{
+ caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
+ kfree(rng_ctx);
}
static int __init caam_rng_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
+ struct device *dev;
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- caam_init_rng(&rng_ctx, priv->jrdev[0]);
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(dev);
+ }
+ rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
+ if (!rng_ctx)
+ return -ENOMEM;
+ caam_init_rng(rng_ctx, dev);
- dev_info(priv->jrdev[0], "registering rng-caam\n");
+ dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
}
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index cf15e781380..f227922cea3 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,10 +23,10 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
-#include <linux/string.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>
+#include <crypto/null.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bf20dd89170..1c38f86bf63 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -5,133 +5,309 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
-#include "ctrl.h"
-static int caam_remove(struct platform_device *pdev)
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_drv_private_jr *jrpriv;
- struct caam_full __iomem *topregs;
- int ring, ret = 0;
+ u32 *jump_cmd, op_flags;
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ init_job_desc(desc, 0);
- /* shut down JobRs */
- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
- ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
- jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
- irq_dispose_mapping(jrpriv->irq);
+ op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
+ /* INIT RNG in non-test mode */
+ append_operation(desc, op_flags);
+
+ if (!handle && do_sk) {
+ /*
+ * For SH0, Secure Keys must be generated as well
+ */
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* Initialize State Handle */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AAI_RNG4_SK);
}
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
- /* Unmap controller region */
- iounmap(&topregs->ctrl);
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+ init_job_desc(desc, 0);
- kfree(ctrlpriv->jrdev);
- kfree(ctrlpriv);
+ /* Uninstantiate State Handle 0 */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
- return ret;
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
/*
- * Descriptor to instantiate RNG State Handle 0 in normal mode and
- * load the JDKEK, TDKEK and TDSK registers
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ * the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ * - -ENODEV if the DECO couldn't be acquired
+ * - -EAGAIN if an error occurred while executing the descriptor
*/
-static void build_instantiation_desc(u32 *desc)
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+ u32 *status)
{
- u32 *jump_cmd;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ unsigned int timeout = 100000;
+ u32 deco_dbg_reg, flags;
+ int i;
- init_job_desc(desc, 0);
+ /* Set the bit to request direct access to DECO0 */
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
- /* INIT RNG in non-test mode */
- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_AS_INIT);
+ while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
+ --timeout)
+ cpu_relax();
+
+ if (!timeout) {
+ dev_err(ctrldev, "failed to acquire DECO 0\n");
+ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ return -ENODEV;
+ }
- /* wait for done */
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
- set_jump_tgt_here(desc, jump_cmd);
+ for (i = 0; i < desc_len(desc); i++)
+ wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
+ flags = DECO_JQCR_WHL;
/*
- * load 1 to clear written reg:
- * resets the done interrrupt and returns the RNG to idle.
+ * If the descriptor length is longer than 4 words, then the
+ * FOUR bit in JRCTRL register must be set.
*/
- append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+ if (desc_len(desc) >= 4)
+ flags |= DECO_JQCR_FOUR;
+
+ /* Instruct the DECO to execute it */
+ wr_reg32(&topregs->deco.jr_ctl_hi, flags);
+
+ timeout = 10000000;
+ do {
+ deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+ if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+ DESC_DBG_DECO_STAT_HOST_ERR)
+ break;
+ cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+
+ *status = rd_reg32(&topregs->deco.op_status_hi) &
+ DECO_OP_STATUS_HI_ERR_MASK;
+
+ /* Mark the DECO as free */
+ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+
+ if (!timeout)
+ return -EAGAIN;
- /* generate secure keys (non-test) */
- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_RNG4_SK);
+ return 0;
}
-struct instantiate_result {
- struct completion completion;
- int err;
-};
-
-static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
- void *context)
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ * which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ * by an external entry, 0 otherwise.
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ * Caution: this can be done only once; if the keys need to be
+ * regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ * f.i. there was a RNG hardware error due to not "good enough"
+ * entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ int gen_sk)
{
- struct instantiate_result *instantiation = context;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 *desc, status, rdsta_val;
+ int ret = 0, sh_idx;
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ r4tst = &topregs->ctrl.r4tst[0];
- dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, this state handle
+ * was initialized by somebody else, so it's left alone.
+ */
+ if ((1 << sh_idx) & state_handle_mask)
+ continue;
+
+ /* Create the descriptor for instantiating RNG State Handle */
+ build_instantiation_desc(desc, sh_idx, gen_sk);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ /*
+ * If ret is not 0, or descriptor status is not 0, then
+ * something went wrong. No need to try the next state
+ * handle (if available), bail out here.
+ * Also, if for some reason, the State Handle didn't get
+ * instantiated although the descriptor has finished
+ * without any error (HW optimizations for later
+ * CAAM eras), then try again.
+ */
+ rdsta_val =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+ if (status || !(rdsta_val & (1 << sh_idx)))
+ ret = -EAGAIN;
+ if (ret)
+ break;
+
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+ /* Clear the contents before recreating the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
- instantiation->err = err;
- complete(&instantiation->completion);
+ kfree(desc);
+
+ return ret;
}
-static int instantiate_rng(struct device *jrdev)
+/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
{
- struct instantiate_result instantiation;
+ u32 *desc, status;
+ int sh_idx, ret = 0;
- dma_addr_t desc_dma;
- u32 *desc;
- int ret;
-
- desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
- if (!desc) {
- dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deintialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret || status) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
}
- build_instantiation_desc(desc);
- desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
- init_completion(&instantiation.completion);
- ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
- if (!ret) {
- wait_for_completion_interruptible(&instantiation.completion);
- ret = instantiation.err;
- if (ret)
- dev_err(jrdev, "unable to instantiate RNG\n");
+ kfree(desc);
+
+ return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_full __iomem *topregs;
+ int ring, ret = 0;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+ /* Remove platform devices for JobRs */
+ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+ if (ctrlpriv->jrpdev[ring])
+ of_device_unregister(ctrlpriv->jrpdev[ring]);
}
- dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
+ /* De-initialize RNG state handles initialized by this driver. */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
- kfree(desc);
+ /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+ /* Unmap controller region */
+ iounmap(&topregs->ctrl);
+
+ kfree(ctrlpriv->jrpdev);
+ kfree(ctrlpriv);
return ret;
}
/*
- * By default, the TRNG runs for 200 clocks per sample;
- * 1600 clocks per sample generates better entropy.
+ * kick_trng - sets the various parameters for enabling the initialization
+ * of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
*/
-static void kick_trng(struct platform_device *pdev)
+static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -144,46 +320,48 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
- /* 1600 clocks per sample */
+
+ /*
+ * Performance-wise, it does not make sense to
+ * set the delay to a value that is lower
+ * than the last one that worked (i.e. the state handles
+ * were instantiated properly. Thus, instead of wasting
+ * time trying to set the values controlling the sample
+ * frequency, the function simply returns.
+ */
+ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+ >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay <= val) {
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ return;
+ }
+
val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count */
- wr_reg32(&r4tst->rtfrqmin, 400);
- /* max. freq. count */
- wr_reg32(&r4tst->rtfrqmax, 6400);
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+ /* max. freq. count, equal to 8 times the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
/* put RNG4 into run mode */
clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
}
/**
* caam_get_era() - Return the ERA of the SEC on SoC, based
- * on the SEC_VID register.
- * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
- * @caam_id - the value of the SEC_VID register
+ * on "sec-era" propery in the DTS. This property is updated by u-boot.
**/
-int caam_get_era(u64 caam_id)
+int caam_get_era(void)
{
- struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
- static const struct {
- u16 ip_id;
- u8 maj_rev;
- u8 era;
- } caam_eras[] = {
- {0x0A10, 1, 1},
- {0x0A10, 2, 2},
- {0x0A12, 1, 3},
- {0x0A14, 1, 3},
- {0x0A14, 2, 4},
- {0x0A16, 1, 4},
- {0x0A11, 1, 4}
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
- if (caam_eras[i].ip_id == sec_vid->ip_id &&
- caam_eras[i].maj_rev == sec_vid->maj_rev)
- return caam_eras[i].era;
+ struct device_node *caam_node;
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ return prop ? *prop : -ENOTSUPP;
+ }
return -ENOTSUPP;
}
@@ -192,7 +370,7 @@ EXPORT_SYMBOL(caam_get_era);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, rspec;
+ int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
@@ -202,6 +380,7 @@ static int caam_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
+ u64 cha_vid;
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
if (!ctrlpriv)
@@ -225,7 +404,7 @@ static int caam_probe(struct platform_device *pdev)
topregs = (struct caam_full __iomem *)ctrl;
/* Get the IRQ of the controller (for security violations only) */
- ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+ ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
@@ -248,34 +427,33 @@ static int caam_probe(struct platform_device *pdev)
* for all, then go probe each one.
*/
rspec = 0;
- for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
- rspec++;
- if (!rspec) {
- /* for backward compatible with device trees */
- for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- }
- ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
- if (ctrlpriv->jrdev == NULL) {
+ ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
+ if (ctrlpriv->jrpdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
}
ring = 0;
ctrlpriv->total_jobrs = 0;
- for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
- ctrlpriv->total_jobrs++;
- ring++;
- }
- if (!ring) {
- for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n",
+ ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
- }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
@@ -293,29 +471,71 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
+
/*
- * RNG4 based SECs (v5+) need special initialization prior
- * to executing any descriptors
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
*/
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) {
- kick_trng(pdev);
- ret = instantiate_rng(ctrlpriv->jrdev[0]);
+ if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ do {
+ int inst_handles =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+ RDSTA_IFMASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ kick_trng(pdev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modfiy the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a sucessful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
caam_remove(pdev);
return ret;
}
+ /*
+ * Set handles init'ed by this module as the complement of the
+ * already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+
+ /* Enable RDB bit so that RNG works faster */
+ setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
- /* Initialize queue allocator lock */
- spin_lock_init(&ctrlpriv->jr_alloc_lock);
-
caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era(caam_id));
+ caam_get_era());
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
@@ -420,7 +640,7 @@ static struct platform_driver caam_driver = {
.of_match_table = caam_match,
},
.probe = caam_probe,
- .remove = __devexit_p(caam_remove),
+ .remove = caam_remove,
};
module_platform_driver(caam_driver);
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h