linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers
@ 2012-07-01 17:19 Eric Bénard
  2012-07-01 17:19 ` [PATCH 1/5] ARM: AT91SAM9G45: add crypto peripherals Eric Bénard
                   ` (5 more replies)
  0 siblings, 6 replies; 9+ messages in thread
From: Eric Bénard @ 2012-07-01 17:19 UTC (permalink / raw)
  To: linux-arm-kernel

This patch serie adds the support for the crypto engine available in
the Atmel SAM9G46 http://www.atmel.com/devices/SAM9G46.aspx and SAM9M11
http://www.atmel.com/devices/SAM9M11.aspx

For each driver the choice between PDC/DMA or PIO was optimized to
maximize the performances (benchmarks will soon be available on
the linux4sam wiki http://www.at91.com/).

These drivers were tested on both devices : the last patch adds a few
missing tests to tcrypt.

This work was sponsored by Atmel.

Nicolas Royer (5):
  ARM: AT91SAM9G45: add crypto peripherals
  crypto: add Atmel AES driver
  crypto: add Atmel DES/TDES driver
  crypto: add Atmel SHA1/SHA256 driver
  crypto: add new tests to tcrypt

 arch/arm/mach-at91/at91sam9g45.c              |   13 +-
 arch/arm/mach-at91/at91sam9g45_devices.c      |  128 +++
 arch/arm/mach-at91/include/mach/at91sam9g45.h |    2 +
 crypto/tcrypt.c                               |   50 +-
 drivers/crypto/Kconfig                        |   47 +
 drivers/crypto/Makefile                       |    5 +-
 drivers/crypto/atmel-aes-regs.h               |   62 ++
 drivers/crypto/atmel-aes.c                    | 1206 ++++++++++++++++++++++++
 drivers/crypto/atmel-sha-regs.h               |   46 +
 drivers/crypto/atmel-sha.c                    | 1112 ++++++++++++++++++++++
 drivers/crypto/atmel-tdes-regs.h              |   89 ++
 drivers/crypto/atmel-tdes.c                   | 1215 +++++++++++++++++++++++++
 include/linux/platform_data/atmel-aes.h       |   22 +
 13 files changed, 3990 insertions(+), 7 deletions(-)
 create mode 100644 drivers/crypto/atmel-aes-regs.h
 create mode 100644 drivers/crypto/atmel-aes.c
 create mode 100644 drivers/crypto/atmel-sha-regs.h
 create mode 100644 drivers/crypto/atmel-sha.c
 create mode 100644 drivers/crypto/atmel-tdes-regs.h
 create mode 100644 drivers/crypto/atmel-tdes.c
 create mode 100644 include/linux/platform_data/atmel-aes.h

-- 
1.7.7.6

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/5] ARM: AT91SAM9G45: add crypto peripherals
  2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
@ 2012-07-01 17:19 ` Eric Bénard
  2012-07-01 17:19 ` [PATCH 2/5] crypto: add Atmel AES driver Eric Bénard
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Eric Bénard @ 2012-07-01 17:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Nicolas Royer <nicolas@eukrea.com>

Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric B?nard <eric@eukrea.com>
Tested-by: Eric B?nard <eric@eukrea.com>
---
 arch/arm/mach-at91/at91sam9g45.c              |   13 +++-
 arch/arm/mach-at91/at91sam9g45_devices.c      |  128 +++++++++++++++++++++++++
 arch/arm/mach-at91/include/mach/at91sam9g45.h |    2 +
 include/linux/platform_data/atmel-aes.h       |   22 ++++
 4 files changed, 164 insertions(+), 1 deletions(-)
 create mode 100644 include/linux/platform_data/atmel-aes.h

diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 4792682..da6dc0f 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -182,6 +182,13 @@ static struct clk adc_op_clk = {
 	.rate_hz	= 13200000,
 };
 
+/* AES/TDES/SHA clock - Only for sam9m11/sam9g56 */
+static struct clk aestdessha_clk = {
+	.name		= "aestdessha_clk",
+	.pmc_mask	= 1 << AT91SAM9G45_ID_AESTDESSHA,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+
 static struct clk *periph_clocks[] __initdata = {
 	&pioA_clk,
 	&pioB_clk,
@@ -211,6 +218,7 @@ static struct clk *periph_clocks[] __initdata = {
 	&udphs_clk,
 	&mmc1_clk,
 	&adc_op_clk,
+	&aestdessha_clk,
 	// irq0
 };
 
@@ -231,6 +239,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
 	CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
 	CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
 	CLKDEV_CON_DEV_ID(NULL, "atmel-trng", &trng_clk),
+	CLKDEV_CON_DEV_ID(NULL, "atmel_sha", &aestdessha_clk),
+	CLKDEV_CON_DEV_ID(NULL, "atmel_tdes", &aestdessha_clk),
+	CLKDEV_CON_DEV_ID(NULL, "atmel_aes", &aestdessha_clk),
 	/* more usart lookup table for DT entries */
 	CLKDEV_CON_DEV_ID("usart", "ffffee00.serial", &mck),
 	CLKDEV_CON_DEV_ID("usart", "fff8c000.serial", &usart0_clk),
@@ -387,7 +398,7 @@ static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = {
 	3,	/* Ethernet */
 	0,	/* Image Sensor Interface */
 	2,	/* USB Device High speed port */
-	0,
+	0,	/* AESTDESSHA Crypto HW Accelerators */
 	0,	/* Multimedia Card Interface 1 */
 	0,
 	0,	/* Advanced Interrupt Controller (IRQ0) */
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 933fc9a..7102f62 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/i2c-gpio.h>
 #include <linux/atmel-mci.h>
+#include <linux/platform_data/atmel-aes.h>
 
 #include <linux/platform_data/at91_adc.h>
 
@@ -1830,6 +1831,130 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
 void __init at91_add_device_serial(void) {}
 #endif
 
+/* --------------------------------------------------------------------
+ *  SHA1/SHA256
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_CRYPTO_DEV_ATMEL_SHA) || defined(CONFIG_CRYPTO_DEV_ATMEL_SHA_MODULE)
+static struct resource sha_resources[] = {
+	{
+		.start	= AT91SAM9G45_BASE_SHA,
+		.end	= AT91SAM9G45_BASE_SHA + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT91SAM9G45_ID_AESTDESSHA,
+		.end	= AT91SAM9G45_ID_AESTDESSHA,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at91sam9g45_sha_device = {
+	.name	= "atmel_sha",
+	.id		= -1,
+	.resource	= sha_resources,
+	.num_resources	= ARRAY_SIZE(sha_resources),
+};
+
+static void __init at91_add_device_sha(void)
+{
+	platform_device_register(&at91sam9g45_sha_device);
+}
+#else
+static void __init at91_add_device_sha(void) {}
+#endif
+
+/* --------------------------------------------------------------------
+ *  DES/TDES
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_CRYPTO_DEV_ATMEL_TDES) || defined(CONFIG_CRYPTO_DEV_ATMEL_TDES_MODULE)
+static struct resource tdes_resources[] = {
+	[0] = {
+		.start	= AT91SAM9G45_BASE_TDES,
+		.end	= AT91SAM9G45_BASE_TDES + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT91SAM9G45_ID_AESTDESSHA,
+		.end	= AT91SAM9G45_ID_AESTDESSHA,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at91sam9g45_tdes_device = {
+	.name	= "atmel_tdes",
+	.id		= -1,
+	.resource	= tdes_resources,
+	.num_resources	= ARRAY_SIZE(tdes_resources),
+};
+
+static void __init at91_add_device_tdes(void)
+{
+	platform_device_register(&at91sam9g45_tdes_device);
+}
+#else
+static void __init at91_add_device_tdes(void) {}
+#endif
+
+/* --------------------------------------------------------------------
+ *  AES
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_CRYPTO_DEV_ATMEL_AES) || defined(CONFIG_CRYPTO_DEV_ATMEL_AES_MODULE)
+static struct aes_platform_data aes_data;
+static u64 aes_dmamask = DMA_BIT_MASK(32);
+
+static struct resource aes_resources[] = {
+	[0] = {
+		.start	= AT91SAM9G45_BASE_AES,
+		.end	= AT91SAM9G45_BASE_AES + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT91SAM9G45_ID_AESTDESSHA,
+		.end	= AT91SAM9G45_ID_AESTDESSHA,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at91sam9g45_aes_device = {
+	.name	= "atmel_aes",
+	.id		= -1,
+	.dev	= {
+		.dma_mask		= &aes_dmamask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.platform_data		= &aes_data,
+	},
+	.resource	= aes_resources,
+	.num_resources	= ARRAY_SIZE(aes_resources),
+};
+
+static void __init at91_add_device_aes(void)
+{
+	struct at_dma_slave	*atslave;
+	struct aes_dma_data	*alt_atslave;
+
+	alt_atslave = kzalloc(sizeof(struct aes_dma_data), GFP_KERNEL);
+
+	/* DMA TX slave channel configuration */
+	atslave = &alt_atslave->txdata;
+	atslave->dma_dev = &at_hdmac_device.dev;
+	atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE	| ATC_SRC_H2SEL_HW |
+						ATC_SRC_PER(AT_DMA_ID_AES_RX);
+
+	/* DMA RX slave channel configuration */
+	atslave = &alt_atslave->rxdata;
+	atslave->dma_dev = &at_hdmac_device.dev;
+	atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE	| ATC_DST_H2SEL_HW |
+						ATC_DST_PER(AT_DMA_ID_AES_TX);
+
+	aes_data.dma_slave = alt_atslave;
+	platform_device_register(&at91sam9g45_aes_device);
+}
+#else
+static void __init at91_add_device_aes(void) {}
+#endif
 
 /* -------------------------------------------------------------------- */
 /*
@@ -1847,6 +1972,9 @@ static int __init at91_add_standard_devices(void)
 	at91_add_device_trng();
 	at91_add_device_watchdog();
 	at91_add_device_tc();
+	at91_add_device_sha();
+	at91_add_device_tdes();
+	at91_add_device_aes();
 	return 0;
 }
 
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
index 3a4da24..8eba102 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9g45.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h
@@ -136,6 +136,8 @@
 #define AT_DMA_ID_SSC1_RX	 8
 #define AT_DMA_ID_AC97_TX	 9
 #define AT_DMA_ID_AC97_RX	10
+#define AT_DMA_ID_AES_TX	11
+#define AT_DMA_ID_AES_RX	12
 #define AT_DMA_ID_MCI1		13
 
 #endif
diff --git a/include/linux/platform_data/atmel-aes.h b/include/linux/platform_data/atmel-aes.h
new file mode 100644
index 0000000..e7a1949
--- /dev/null
+++ b/include/linux/platform_data/atmel-aes.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_ATMEL_AES_H
+#define __LINUX_ATMEL_AES_H
+
+#include <mach/at_hdmac.h>
+
+/**
+ * struct aes_dma_data - DMA data for AES
+ */
+struct aes_dma_data {
+	struct at_dma_slave	txdata;
+	struct at_dma_slave	rxdata;
+};
+
+/**
+ * struct aes_platform_data - board-specific AES configuration
+ * @dma_slave: DMA slave interface to use in data transfers.
+ */
+struct aes_platform_data {
+	struct aes_dma_data	*dma_slave;
+};
+
+#endif /* __LINUX_ATMEL_AES_H */
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/5] crypto: add Atmel AES driver
  2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
  2012-07-01 17:19 ` [PATCH 1/5] ARM: AT91SAM9G45: add crypto peripherals Eric Bénard
@ 2012-07-01 17:19 ` Eric Bénard
  2012-07-06 12:17   ` Jean-Christophe PLAGNIOL-VILLARD
  2012-07-01 17:19 ` [PATCH 3/5] crypto: add Atmel DES/TDES driver Eric Bénard
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 9+ messages in thread
From: Eric Bénard @ 2012-07-01 17:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Nicolas Royer <nicolas@eukrea.com>

Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric B?nard <eric@eukrea.com>
Tested-by: Eric B?nard <eric@eukrea.com>
---
 drivers/crypto/Kconfig          |   17 +
 drivers/crypto/Makefile         |    3 +-
 drivers/crypto/atmel-aes-regs.h |   62 ++
 drivers/crypto/atmel-aes.c      | 1206 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 1287 insertions(+), 1 deletions(-)
 create mode 100644 drivers/crypto/atmel-aes-regs.h
 create mode 100644 drivers/crypto/atmel-aes.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1092a77..1be94e5 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -325,4 +325,21 @@ if CRYPTO_DEV_UX500
 	source "drivers/crypto/ux500/Kconfig"
 endif # if CRYPTO_DEV_UX500
 
+config CRYPTO_DEV_ATMEL_AES
+	tristate "Support for Atmel AES hw accelerator"
+	depends on ARCH_AT91
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_AES
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	select CONFIG_AT_HDMAC
+	help
+	  Some Atmel processors have AES hw accelerator.
+	  Select this if you want to use the Atmel module for
+	  AES algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-aes.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 0139032..7d17b67 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -14,4 +14,5 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
 obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
-obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
\ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
new file mode 100644
index 0000000..2786bb1
--- /dev/null
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -0,0 +1,62 @@
+#ifndef __ATMEL_AES_REGS_H__
+#define __ATMEL_AES_REGS_H__
+
+#define AES_CR			0x00
+#define AES_CR_START		(1 << 0)
+#define AES_CR_SWRST		(1 << 8)
+#define AES_CR_LOADSEED		(1 << 16)
+
+#define	AES_MR			0x04
+#define AES_MR_CYPHER_DEC		(0 << 0)
+#define AES_MR_CYPHER_ENC		(1 << 0)
+#define	AES_MR_DUALBUFF			(1 << 3)
+#define AES_MR_PROCDLY_MASK		(0xF << 4)
+#define AES_MR_PROCDLY_OFFSET	4
+#define AES_MR_SMOD_MASK		(0x3 << 8)
+#define AES_MR_SMOD_MANUAL		(0x0 << 8)
+#define AES_MR_SMOD_AUTO		(0x1 << 8)
+#define AES_MR_SMOD_IDATAR0		(0x2 << 8)
+#define	AES_MR_KEYSIZE_MASK		(0x3 << 10)
+#define	AES_MR_KEYSIZE_128		(0x0 << 10)
+#define	AES_MR_KEYSIZE_192		(0x1 << 10)
+#define	AES_MR_KEYSIZE_256		(0x2 << 10)
+#define AES_MR_OPMOD_MASK		(0x7 << 12)
+#define AES_MR_OPMOD_ECB		(0x0 << 12)
+#define AES_MR_OPMOD_CBC		(0x1 << 12)
+#define AES_MR_OPMOD_OFB		(0x2 << 12)
+#define AES_MR_OPMOD_CFB		(0x3 << 12)
+#define AES_MR_OPMOD_CTR		(0x4 << 12)
+#define AES_MR_LOD				(0x1 << 15)
+#define AES_MR_CFBS_MASK		(0x7 << 16)
+#define AES_MR_CFBS_128b		(0x0 << 16)
+#define AES_MR_CFBS_64b			(0x1 << 16)
+#define AES_MR_CFBS_32b			(0x2 << 16)
+#define AES_MR_CFBS_16b			(0x3 << 16)
+#define AES_MR_CFBS_8b			(0x4 << 16)
+#define AES_MR_CKEY_MASK		(0xF << 20)
+#define AES_MR_CKEY_OFFSET		20
+#define AES_MR_CMTYP_MASK		(0x1F << 24)
+#define AES_MR_CMTYP_OFFSET		24
+
+#define	AES_IER		0x10
+#define	AES_IDR		0x14
+#define	AES_IMR		0x18
+#define	AES_ISR		0x1C
+#define AES_INT_DATARDY		(1 << 0)
+#define AES_INT_URAD		(1 << 8)
+#define AES_ISR_URAT_MASK	(0xF << 12)
+#define AES_ISR_URAT_IDR_WR_PROC	(0x0 << 12)
+#define AES_ISR_URAT_ODR_RD_PROC	(0x1 << 12)
+#define AES_ISR_URAT_MR_WR_PROC		(0x2 << 12)
+#define AES_ISR_URAT_ODR_RD_SUBK	(0x3 << 12)
+#define AES_ISR_URAT_MR_WR_SUBK		(0x4 << 12)
+#define AES_ISR_URAT_WOR_RD			(0x5 << 12)
+
+#define AES_KEYWR(x)	(0x20 + ((x) * 0x04))
+#define AES_IDATAR(x)	(0x40 + ((x) * 0x04))
+#define AES_ODATAR(x)	(0x50 + ((x) * 0x04))
+#define AES_IVR(x)		(0x60 + ((x) * 0x04))
+
+#define AES_HW_VERSION	0xFC
+
+#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
new file mode 100644
index 0000000..6bb20ff
--- /dev/null
+++ b/drivers/crypto/atmel-aes.c
@@ -0,0 +1,1206 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL AES HW acceleration.
+ *
+ * Copyright (c) 2012 Eukr?a Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-aes.c driver.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/platform_data/atmel-aes.h>
+#include "atmel-aes-regs.h"
+
+#define CFB8_BLOCK_SIZE		1
+#define CFB16_BLOCK_SIZE	2
+#define CFB32_BLOCK_SIZE	4
+#define CFB64_BLOCK_SIZE	8
+
+/* AES flags */
+#define AES_FLAGS_MODE_MASK	0x01ff
+#define AES_FLAGS_ENCRYPT	BIT(0)
+#define AES_FLAGS_CBC		BIT(1)
+#define AES_FLAGS_CFB		BIT(2)
+#define AES_FLAGS_CFB8		BIT(3)
+#define AES_FLAGS_CFB16		BIT(4)
+#define AES_FLAGS_CFB32		BIT(5)
+#define AES_FLAGS_CFB64		BIT(6)
+#define AES_FLAGS_OFB		BIT(7)
+#define AES_FLAGS_CTR		BIT(8)
+
+#define AES_FLAGS_INIT		BIT(16)
+#define AES_FLAGS_DMA		BIT(17)
+#define AES_FLAGS_BUSY		BIT(18)
+
+#define AES_FLAGS_DUALBUFF	BIT(24)
+
+#define ATMEL_AES_QUEUE_LENGTH	1
+#define ATMEL_AES_CACHE_SIZE	0
+
+#define ATMEL_AES_DMA_THRESHOLD		16
+
+
+struct atmel_aes_dev;
+
+struct atmel_aes_ctx {
+	struct atmel_aes_dev *dd;
+
+	int		keylen;
+	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
+struct atmel_aes_reqctx {
+	unsigned long mode;
+};
+
+struct atmel_aes_dma {
+	struct dma_chan			*chan;
+	struct dma_slave_config dma_conf;
+};
+
+struct atmel_aes_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	void __iomem		*io_base;
+
+	struct atmel_aes_ctx	*ctx;
+	struct device		*dev;
+	struct clk		*iclk;
+	int	irq;
+
+	unsigned long		flags;
+	int	err;
+
+	spinlock_t		lock;
+	struct crypto_queue	queue;
+
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
+
+	struct ablkcipher_request	*req;
+	size_t	total;
+
+	struct scatterlist	*in_sg;
+	unsigned int		nb_in_sg;
+
+	struct scatterlist	*out_sg;
+	unsigned int		nb_out_sg;
+
+	size_t	bufcnt;
+
+	u8	buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
+	int	dma_in;
+	struct atmel_aes_dma	dma_lch_in;
+
+	u8	buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
+	int	dma_out;
+	struct atmel_aes_dma	dma_lch_out;
+
+	u32	hw_version;
+};
+
+struct atmel_aes_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+};
+
+static struct atmel_aes_drv atmel_aes = {
+	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
+};
+
+static int atmel_aes_sg_length(struct ablkcipher_request *req,
+			struct scatterlist *sg)
+{
+	unsigned int total = req->nbytes;
+	int sg_nb;
+	unsigned int len;
+	struct scatterlist *sg_list;
+
+	sg_nb = 0;
+	sg_list = sg;
+	total = req->nbytes;
+
+	while (total) {
+		len = min(sg_list->length, total);
+
+		sg_nb++;
+		total -= len;
+
+		sg_list = sg_next(sg_list);
+		if (!sg_list)
+			total = 0;
+	}
+
+	return sg_nb;
+}
+
+static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
+{
+	return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_aes_write(struct atmel_aes_dev *dd,
+					u32 offset, u32 value)
+{
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		*value = atmel_aes_read(dd, offset);
+}
+
+static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		atmel_aes_write(dd, offset, *value);
+}
+
+static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
+{
+	atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
+
+	if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
+		dd->flags |= AES_FLAGS_DUALBUFF;
+}
+
+static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
+{
+	struct atmel_aes_dev *aes_dd = NULL;
+	struct atmel_aes_dev *tmp;
+
+	spin_lock_bh(&atmel_aes.lock);
+	if (!ctx->dd) {
+		list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
+			aes_dd = tmp;
+			break;
+		}
+		ctx->dd = aes_dd;
+	} else {
+		aes_dd = ctx->dd;
+	}
+
+	spin_unlock_bh(&atmel_aes.lock);
+
+	return aes_dd;
+}
+
+static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
+{
+	clk_prepare_enable(dd->iclk);
+
+	if (!(dd->flags & AES_FLAGS_INIT)) {
+		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+		atmel_aes_dualbuff_test(dd);
+		dd->flags |= AES_FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
+{
+	atmel_aes_hw_init(dd);
+
+	dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
+
+	clk_disable_unprepare(dd->iclk);
+}
+
+static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
+{
+	struct ablkcipher_request *req = dd->req;
+
+	clk_disable_unprepare(dd->iclk);
+	dd->flags &= ~AES_FLAGS_BUSY;
+
+	req->base.complete(&req->base, err);
+}
+
+static void atmel_aes_dma_callback(void *data)
+{
+	struct atmel_aes_dev *dd = data;
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
+}
+
+static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
+{
+	struct dma_async_tx_descriptor	*in_desc, *out_desc;
+	int nb_dma_sg_in, nb_dma_sg_out;
+
+	dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
+	if (!dd->nb_in_sg)
+		goto exit_err;
+
+	nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
+			DMA_TO_DEVICE);
+	if (!nb_dma_sg_in)
+		goto exit_err;
+
+	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
+				nb_dma_sg_in, DMA_MEM_TO_DEV,
+				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
+
+	if (!in_desc)
+		goto unmap_in;
+
+	/* callback not needed */
+
+	dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
+	if (!dd->nb_out_sg)
+		goto unmap_in;
+
+	nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
+			DMA_FROM_DEVICE);
+	if (!nb_dma_sg_out)
+		goto unmap_out;
+
+	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
+				nb_dma_sg_out, DMA_DEV_TO_MEM,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+	if (!out_desc)
+		goto unmap_out;
+
+	out_desc->callback = atmel_aes_dma_callback;
+	out_desc->callback_param = dd;
+
+	dd->total -= dd->req->nbytes;
+
+	dmaengine_submit(out_desc);
+	dma_async_issue_pending(dd->dma_lch_out.chan);
+
+	dmaengine_submit(in_desc);
+	dma_async_issue_pending(dd->dma_lch_in.chan);
+
+	return 0;
+
+unmap_out:
+	dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
+		DMA_FROM_DEVICE);
+unmap_in:
+	dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
+		DMA_TO_DEVICE);
+exit_err:
+	return -EINVAL;
+}
+
+static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
+{
+	dd->flags &= ~AES_FLAGS_DMA;
+
+	/* use cache buffers */
+	dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
+	if (!dd->nb_in_sg)
+		return -EINVAL;
+
+	dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
+	if (!dd->nb_in_sg)
+		return -EINVAL;
+
+	dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
+					dd->buf_in, dd->total);
+
+	if (!dd->bufcnt)
+		return -EINVAL;
+
+	dd->total -= dd->bufcnt;
+
+	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+	atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
+				dd->bufcnt >> 2);
+
+	return 0;
+}
+
+static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
+{
+	int err;
+
+	if (dd->flags & AES_FLAGS_CFB8) {
+		dd->dma_lch_in.dma_conf.dst_addr_width =
+			DMA_SLAVE_BUSWIDTH_1_BYTE;
+		dd->dma_lch_out.dma_conf.src_addr_width =
+			DMA_SLAVE_BUSWIDTH_1_BYTE;
+	} else if (dd->flags & AES_FLAGS_CFB16) {
+		dd->dma_lch_in.dma_conf.dst_addr_width =
+			DMA_SLAVE_BUSWIDTH_2_BYTES;
+		dd->dma_lch_out.dma_conf.src_addr_width =
+			DMA_SLAVE_BUSWIDTH_2_BYTES;
+	} else {
+		dd->dma_lch_in.dma_conf.dst_addr_width =
+			DMA_SLAVE_BUSWIDTH_4_BYTES;
+		dd->dma_lch_out.dma_conf.src_addr_width =
+			DMA_SLAVE_BUSWIDTH_4_BYTES;
+	}
+
+	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+
+	dd->flags |= AES_FLAGS_DMA;
+	err = atmel_aes_crypt_dma(dd);
+
+	return err;
+}
+
+static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
+{
+	int err;
+	u32 valcr = 0, valmr = 0;
+
+	err = atmel_aes_hw_init(dd);
+
+	if (err)
+		return err;
+
+	/* MR register must be set before IV registers */
+	if (dd->ctx->keylen == AES_KEYSIZE_128)
+		valmr |= AES_MR_KEYSIZE_128;
+	else if (dd->ctx->keylen == AES_KEYSIZE_192)
+		valmr |= AES_MR_KEYSIZE_192;
+	else
+		valmr |= AES_MR_KEYSIZE_256;
+
+	if (dd->flags & AES_FLAGS_CBC) {
+		valmr |= AES_MR_OPMOD_CBC;
+	} else if (dd->flags & AES_FLAGS_CFB) {
+		valmr |= AES_MR_OPMOD_CFB;
+		if (dd->flags & AES_FLAGS_CFB8)
+			valmr |= AES_MR_CFBS_8b;
+		else if (dd->flags & AES_FLAGS_CFB16)
+			valmr |= AES_MR_CFBS_16b;
+		else if (dd->flags & AES_FLAGS_CFB32)
+			valmr |= AES_MR_CFBS_32b;
+		else if (dd->flags & AES_FLAGS_CFB64)
+			valmr |= AES_MR_CFBS_64b;
+	} else if (dd->flags & AES_FLAGS_OFB) {
+		valmr |= AES_MR_OPMOD_OFB;
+	} else if (dd->flags & AES_FLAGS_CTR) {
+		valmr |= AES_MR_OPMOD_CTR;
+	} else {
+		valmr |= AES_MR_OPMOD_ECB;
+	}
+
+	if (dd->flags & AES_FLAGS_ENCRYPT)
+		valmr |= AES_MR_CYPHER_ENC;
+
+	if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
+		valmr |= AES_MR_SMOD_IDATAR0;
+		if (dd->flags & AES_FLAGS_DUALBUFF)
+			valmr |= AES_MR_DUALBUFF;
+	} else {
+		valmr |= AES_MR_SMOD_AUTO;
+	}
+
+	atmel_aes_write(dd, AES_CR, valcr);
+	atmel_aes_write(dd, AES_MR, valmr);
+
+	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
+						dd->ctx->keylen >> 2);
+
+	if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
+	   (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
+	   dd->req->info) {
+		atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
+	}
+
+	return 0;
+}
+
+static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
+			       struct ablkcipher_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct atmel_aes_ctx *ctx;
+	struct atmel_aes_reqctx *rctx;
+	unsigned long flags;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (dd->flags & AES_FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		dd->flags |= AES_FLAGS_BUSY;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ablkcipher_request_cast(async_req);
+
+	/* assign new request to device */
+	dd->req = req;
+	dd->total = req->nbytes;
+	dd->in_sg = req->src;
+	dd->out_sg = req->dst;
+
+	rctx = ablkcipher_request_ctx(req);
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	rctx->mode &= AES_FLAGS_MODE_MASK;
+	dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
+	dd->ctx = ctx;
+	ctx->dd = dd;
+
+	err = atmel_aes_write_ctrl(dd);
+	if (!err) {
+		if (dd->total > ATMEL_AES_DMA_THRESHOLD)
+			err = atmel_aes_crypt_dma_start(dd);
+		else
+			err = atmel_aes_crypt_cpu_start(dd);
+	}
+	if (err) {
+		/* aes_task will not finish it, so do it here */
+		atmel_aes_finish_req(dd, err);
+		tasklet_schedule(&dd->queue_task);
+	}
+
+	return ret;
+}
+
+static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
+{
+	int err = -EINVAL;
+
+	if (dd->flags & AES_FLAGS_DMA) {
+		dma_unmap_sg(dd->dev, dd->out_sg,
+			dd->nb_out_sg, DMA_FROM_DEVICE);
+		dma_unmap_sg(dd->dev, dd->in_sg,
+			dd->nb_in_sg, DMA_TO_DEVICE);
+		err = 0;
+	}
+
+	return err;
+}
+
+static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct atmel_aes_dev *dd;
+
+	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+		pr_err("request size is not exact amount of AES blocks\n");
+		return -EINVAL;
+	}
+
+	dd = atmel_aes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx->mode = mode;
+
+	return atmel_aes_handle_queue(dd, req);
+}
+
+static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+{
+	struct at_dma_slave	*sl = slave;
+
+	if (sl && sl->dma_dev == chan->device->dev) {
+		chan->private = sl;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
+{
+	int err = -ENOMEM;
+	struct aes_platform_data	*pdata;
+	dma_cap_mask_t mask_in, mask_out;
+
+	pdata = dd->dev->platform_data;
+
+	if (pdata && pdata->dma_slave->txdata.dma_dev &&
+		pdata->dma_slave->rxdata.dma_dev) {
+
+		/* Try to grab 2 DMA channels */
+		dma_cap_zero(mask_in);
+		dma_cap_set(DMA_SLAVE, mask_in);
+
+		dd->dma_lch_in.chan = dma_request_channel(mask_in,
+				atmel_aes_filter, &pdata->dma_slave->rxdata);
+		if (!dd->dma_lch_in.chan)
+			goto err_dma_in;
+
+		dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+		dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+			AES_IDATAR(0);
+		dd->dma_lch_in.dma_conf.src_maxburst = 1;
+		dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+		dd->dma_lch_in.dma_conf.device_fc = false;
+
+		dma_cap_zero(mask_out);
+		dma_cap_set(DMA_SLAVE, mask_out);
+		dd->dma_lch_out.chan = dma_request_channel(mask_out,
+				atmel_aes_filter, &pdata->dma_slave->txdata);
+		if (!dd->dma_lch_out.chan)
+			goto err_dma_out;
+
+		dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+		dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+			AES_ODATAR(0);
+		dd->dma_lch_out.dma_conf.src_maxburst = 1;
+		dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+		dd->dma_lch_out.dma_conf.device_fc = false;
+
+		return 0;
+	} else {
+		return -ENODEV;
+	}
+
+err_dma_out:
+	dma_release_channel(dd->dma_lch_in.chan);
+err_dma_in:
+	return err;
+}
+
+static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
+{
+	dma_release_channel(dd->dma_lch_in.chan);
+	dma_release_channel(dd->dma_lch_out.chan);
+}
+
+static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+		   keylen != AES_KEYSIZE_256) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		0);
+}
+
+static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
+}
+
+static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CBC);
+}
+
+static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
+}
+
+static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_OFB);
+}
+
+static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
+}
+
+static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CFB);
+}
+
+static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
+}
+
+static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CFB | AES_FLAGS_CFB64);
+}
+
+static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
+}
+
+static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CFB | AES_FLAGS_CFB32);
+}
+
+static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
+}
+
+static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CFB | AES_FLAGS_CFB16);
+}
+
+static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT |	AES_FLAGS_CFB | AES_FLAGS_CFB8);
+}
+
+static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CFB | AES_FLAGS_CFB8);
+}
+
+static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
+}
+
+static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req,
+		AES_FLAGS_CTR);
+}
+
+static int atmel_aes_cra_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+
+	return 0;
+}
+
+static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "atmel-ecb-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_ecb_encrypt,
+		.decrypt	= atmel_aes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "atmel-cbc-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cbc_encrypt,
+		.decrypt	= atmel_aes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "ofb(aes)",
+	.cra_driver_name	= "atmel-ofb-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_ofb_encrypt,
+		.decrypt	= atmel_aes_ofb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb(aes)",
+	.cra_driver_name	= "atmel-cfb-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb_encrypt,
+		.decrypt	= atmel_aes_cfb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb32(aes)",
+	.cra_driver_name	= "atmel-cfb32-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB32_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb32_encrypt,
+		.decrypt	= atmel_aes_cfb32_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb16(aes)",
+	.cra_driver_name	= "atmel-cfb16-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB16_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb16_encrypt,
+		.decrypt	= atmel_aes_cfb16_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb8(aes)",
+	.cra_driver_name	= "atmel-cfb8-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB64_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb8_encrypt,
+		.decrypt	= atmel_aes_cfb8_decrypt,
+	}
+},
+{
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "atmel-ctr-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_ctr_encrypt,
+		.decrypt	= atmel_aes_ctr_decrypt,
+	}
+},
+};
+
+static struct crypto_alg aes_cfb64_alg[] = {
+{
+	.cra_name		= "cfb64(aes)",
+	.cra_driver_name	= "atmel-cfb64-aes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB64_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb64_encrypt,
+		.decrypt	= atmel_aes_cfb64_decrypt,
+	}
+},
+};
+
+static void atmel_aes_queue_task(unsigned long data)
+{
+	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
+
+	atmel_aes_handle_queue(dd, NULL);
+}
+
+static void atmel_aes_done_task(unsigned long data)
+{
+	struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
+	int err;
+
+	if (!(dd->flags & AES_FLAGS_DMA)) {
+		atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
+				dd->bufcnt >> 2);
+
+		if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
+			dd->buf_out, dd->bufcnt))
+			err = 0;
+		else
+			err = -EINVAL;
+
+		goto cpu_end;
+	}
+
+	err = atmel_aes_crypt_dma_stop(dd);
+
+	err = dd->err ? : err;
+
+	if (dd->total && !err) {
+		err = atmel_aes_crypt_dma_start(dd);
+		if (!err)
+			return; /* DMA started. Not fininishing. */
+	}
+
+cpu_end:
+	atmel_aes_finish_req(dd, err);
+	atmel_aes_handle_queue(dd, NULL);
+}
+
+static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
+{
+	struct atmel_aes_dev *aes_dd = dev_id;
+	u32 reg;
+
+	reg = atmel_aes_read(aes_dd, AES_ISR);
+	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
+		atmel_aes_write(aes_dd, AES_IDR, reg);
+		if (AES_FLAGS_BUSY & aes_dd->flags)
+			tasklet_schedule(&aes_dd->done_task);
+		else
+			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+		crypto_unregister_alg(&aes_algs[i]);
+	if (dd->hw_version >= 0x130)
+		crypto_unregister_alg(&aes_cfb64_alg[0]);
+}
+
+static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
+{
+	int err, i, j;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		INIT_LIST_HEAD(&aes_algs[i].cra_list);
+		err = crypto_register_alg(&aes_algs[i]);
+		if (err)
+			goto err_aes_algs;
+	}
+
+	atmel_aes_hw_version_init(dd);
+
+	if (dd->hw_version >= 0x130) {
+		INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
+		err = crypto_register_alg(&aes_cfb64_alg[0]);
+		if (err)
+			goto err_aes_cfb64_alg;
+	}
+
+	return 0;
+
+err_aes_cfb64_alg:
+	i = ARRAY_SIZE(aes_algs);
+err_aes_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_alg(&aes_algs[j]);
+
+	return err;
+}
+
+static int __devinit atmel_aes_probe(struct platform_device *pdev)
+{
+	struct atmel_aes_dev *aes_dd;
+	struct aes_platform_data	*pdata;
+	struct device *dev = &pdev->dev;
+	struct resource *aes_res;
+	unsigned long aes_phys_size;
+	int err;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		err = -ENXIO;
+		goto aes_dd_err;
+	}
+
+	aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
+	if (aes_dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		err = -ENOMEM;
+		goto aes_dd_err;
+	}
+
+	aes_dd->dev = dev;
+
+	platform_set_drvdata(pdev, aes_dd);
+
+	INIT_LIST_HEAD(&aes_dd->list);
+
+	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
+					(unsigned long)aes_dd);
+	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
+					(unsigned long)aes_dd);
+
+	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
+
+	aes_dd->irq = -1;
+
+	/* Get the base address */
+	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!aes_res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	aes_dd->phys_base = aes_res->start;
+	aes_phys_size = resource_size(aes_res);
+
+	/* Get the IRQ */
+	aes_dd->irq = platform_get_irq(pdev,  0);
+	if (aes_dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = aes_dd->irq;
+		goto aes_irq_err;
+	}
+
+	err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
+						aes_dd);
+	if (err) {
+		dev_err(dev, "unable to request aes irq.\n");
+		goto aes_irq_err;
+	}
+
+	/* Initializing the clock */
+	aes_dd->iclk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(aes_dd->iclk)) {
+		dev_err(dev, "clock intialization failed.\n");
+		err = PTR_ERR(aes_dd->iclk);
+		goto clk_err;
+	}
+
+	aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
+	if (!aes_dd->io_base) {
+		dev_err(dev, "can't ioremap\n");
+		err = -ENOMEM;
+		goto aes_io_err;
+	}
+
+	err = atmel_aes_dma_init(aes_dd);
+	if (err)
+		goto err_aes_dma;
+
+	spin_lock(&atmel_aes.lock);
+	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
+	spin_unlock(&atmel_aes.lock);
+
+	err = atmel_aes_register_algs(aes_dd);
+	if (err)
+		goto err_algs;
+
+	dev_info(dev, "Atmel AES\n");
+
+	return 0;
+
+err_algs:
+	spin_lock(&atmel_aes.lock);
+	list_del(&aes_dd->list);
+	spin_unlock(&atmel_aes.lock);
+	atmel_aes_dma_cleanup(aes_dd);
+err_aes_dma:
+	iounmap(aes_dd->io_base);
+aes_io_err:
+	clk_put(aes_dd->iclk);
+clk_err:
+	free_irq(aes_dd->irq, aes_dd);
+aes_irq_err:
+res_err:
+	tasklet_kill(&aes_dd->done_task);
+	tasklet_kill(&aes_dd->queue_task);
+	kfree(aes_dd);
+	aes_dd = NULL;
+aes_dd_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int __devexit atmel_aes_remove(struct platform_device *pdev)
+{
+	static struct atmel_aes_dev *aes_dd;
+
+	aes_dd = platform_get_drvdata(pdev);
+	if (!aes_dd)
+		return -ENODEV;
+	spin_lock(&atmel_aes.lock);
+	list_del(&aes_dd->list);
+	spin_unlock(&atmel_aes.lock);
+
+	atmel_aes_unregister_algs(aes_dd);
+
+	tasklet_kill(&aes_dd->done_task);
+	tasklet_kill(&aes_dd->queue_task);
+
+	atmel_aes_dma_cleanup(aes_dd);
+
+	iounmap(aes_dd->io_base);
+
+	clk_put(aes_dd->iclk);
+
+	if (aes_dd->irq > 0)
+		free_irq(aes_dd->irq, aes_dd);
+
+	kfree(aes_dd);
+	aes_dd = NULL;
+
+	return 0;
+}
+
+static struct platform_driver atmel_aes_driver = {
+	.probe		= atmel_aes_probe,
+	.remove		= __devexit_p(atmel_aes_remove),
+	.driver		= {
+		.name	= "atmel_aes",
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(atmel_aes_driver);
+
+MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukr?a Electromatique");
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/5] crypto: add Atmel DES/TDES driver
  2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
  2012-07-01 17:19 ` [PATCH 1/5] ARM: AT91SAM9G45: add crypto peripherals Eric Bénard
  2012-07-01 17:19 ` [PATCH 2/5] crypto: add Atmel AES driver Eric Bénard
@ 2012-07-01 17:19 ` Eric Bénard
  2012-07-01 17:19 ` [PATCH 4/5] crypto: add Atmel SHA1/SHA256 driver Eric Bénard
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Eric Bénard @ 2012-07-01 17:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Nicolas Royer <nicolas@eukrea.com>

Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric B?nard <eric@eukrea.com>
Tested-by: Eric B?nard <eric@eukrea.com>
---
 drivers/crypto/Kconfig           |   16 +
 drivers/crypto/Makefile          |    1 +
 drivers/crypto/atmel-tdes-regs.h |   89 +++
 drivers/crypto/atmel-tdes.c      | 1215 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 1321 insertions(+), 0 deletions(-)
 create mode 100644 drivers/crypto/atmel-tdes-regs.h
 create mode 100644 drivers/crypto/atmel-tdes.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1be94e5..9ac7128 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -342,4 +342,20 @@ config CRYPTO_DEV_ATMEL_AES
 	  To compile this driver as a module, choose M here: the module
 	  will be called atmel-aes.
 
+config CRYPTO_DEV_ATMEL_TDES
+	tristate "Support for Atmel DES/TDES hw accelerator"
+	depends on ARCH_AT91
+	select CRYPTO_DES
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	help
+	  Some Atmel processors have DES/TDES hw accelerator.
+	  Select this if you want to use the Atmel module for
+	  DES/TDES algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-tdes.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7d17b67..211fdc2 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
 obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
new file mode 100644
index 0000000..5ac2a90
--- /dev/null
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -0,0 +1,89 @@
+#ifndef __ATMEL_TDES_REGS_H__
+#define __ATMEL_TDES_REGS_H__
+
+#define TDES_CR			0x00
+#define TDES_CR_START			(1 << 0)
+#define TDES_CR_SWRST			(1 << 8)
+#define TDES_CR_LOADSEED		(1 << 16)
+
+#define	TDES_MR			0x04
+#define TDES_MR_CYPHER_DEC		(0 << 0)
+#define TDES_MR_CYPHER_ENC		(1 << 0)
+#define TDES_MR_TDESMOD_MASK	(0x3 << 1)
+#define TDES_MR_TDESMOD_DES		(0x0 << 1)
+#define TDES_MR_TDESMOD_TDES	(0x1 << 1)
+#define TDES_MR_TDESMOD_XTEA	(0x2 << 1)
+#define TDES_MR_KEYMOD_3KEY		(0 << 4)
+#define TDES_MR_KEYMOD_2KEY		(1 << 4)
+#define TDES_MR_SMOD_MASK		(0x3 << 8)
+#define TDES_MR_SMOD_MANUAL		(0x0 << 8)
+#define TDES_MR_SMOD_AUTO		(0x1 << 8)
+#define TDES_MR_SMOD_PDC		(0x2 << 8)
+#define TDES_MR_OPMOD_MASK		(0x3 << 12)
+#define TDES_MR_OPMOD_ECB		(0x0 << 12)
+#define TDES_MR_OPMOD_CBC		(0x1 << 12)
+#define TDES_MR_OPMOD_OFB		(0x2 << 12)
+#define TDES_MR_OPMOD_CFB		(0x3 << 12)
+#define TDES_MR_LOD				(0x1 << 15)
+#define TDES_MR_CFBS_MASK		(0x3 << 16)
+#define TDES_MR_CFBS_64b		(0x0 << 16)
+#define TDES_MR_CFBS_32b		(0x1 << 16)
+#define TDES_MR_CFBS_16b		(0x2 << 16)
+#define TDES_MR_CFBS_8b			(0x3 << 16)
+#define TDES_MR_CKEY_MASK		(0xF << 20)
+#define TDES_MR_CKEY_OFFSET		20
+#define TDES_MR_CTYPE_MASK		(0x3F << 24)
+#define TDES_MR_CTYPE_OFFSET	24
+
+#define	TDES_IER		0x10
+#define	TDES_IDR		0x14
+#define	TDES_IMR		0x18
+#define	TDES_ISR		0x1C
+#define TDES_INT_DATARDY		(1 << 0)
+#define TDES_INT_ENDRX			(1 << 1)
+#define TDES_INT_ENDTX			(1 << 2)
+#define TDES_INT_RXBUFF			(1 << 3)
+#define TDES_INT_TXBUFE			(1 << 4)
+#define TDES_INT_URAD			(1 << 8)
+#define TDES_ISR_URAT_MASK		(0x3 << 12)
+#define TDES_ISR_URAT_IDR		(0x0 << 12)
+#define TDES_ISR_URAT_ODR		(0x1 << 12)
+#define TDES_ISR_URAT_MR		(0x2 << 12)
+#define TDES_ISR_URAT_WO		(0x3 << 12)
+
+
+#define	TDES_KEY1W1R	0x20
+#define	TDES_KEY1W2R	0x24
+#define	TDES_KEY2W1R	0x28
+#define	TDES_KEY2W2R	0x2C
+#define	TDES_KEY3W1R	0x30
+#define	TDES_KEY3W2R	0x34
+#define	TDES_IDATA1R	0x40
+#define	TDES_IDATA2R	0x44
+#define	TDES_ODATA1R	0x50
+#define	TDES_ODATA2R	0x54
+#define	TDES_IV1R		0x60
+#define	TDES_IV2R		0x64
+
+#define	TDES_XTEARNDR	0x70
+#define	TDES_XTEARNDR_XTEA_RNDS_MASK	(0x3F << 0)
+#define	TDES_XTEARNDR_XTEA_RNDS_OFFSET	0
+
+#define TDES_RPR		0x100
+#define TDES_RCR		0x104
+#define TDES_TPR		0x108
+#define TDES_TCR		0x10C
+#define TDES_RNPR		0x118
+#define TDES_RNCR		0x11C
+#define TDES_TNPR		0x118
+#define TDES_TNCR		0x11C
+#define TDES_PTCR		0x120
+#define TDES_PTCR_RXTEN			(1 << 0)
+#define TDES_PTCR_RXTDIS		(1 << 1)
+#define TDES_PTCR_TXTEN			(1 << 8)
+#define TDES_PTCR_TXTDIS		(1 << 9)
+#define TDES_PTSR		0x124
+#define TDES_PTSR_RXTEN			(1 << 0)
+#define TDES_PTSR_TXTEN			(1 << 8)
+
+#endif /* __ATMEL_TDES_REGS_H__ */
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
new file mode 100644
index 0000000..eb2b61e
--- /dev/null
+++ b/drivers/crypto/atmel-tdes.c
@@ -0,0 +1,1215 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL DES/TDES HW acceleration.
+ *
+ * Copyright (c) 2012 Eukr?a Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-aes.c drivers.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include "atmel-tdes-regs.h"
+
+/* TDES flags  */
+#define TDES_FLAGS_MODE_MASK		0x007f
+#define TDES_FLAGS_ENCRYPT	BIT(0)
+#define TDES_FLAGS_CBC		BIT(1)
+#define TDES_FLAGS_CFB		BIT(2)
+#define TDES_FLAGS_CFB8		BIT(3)
+#define TDES_FLAGS_CFB16	BIT(4)
+#define TDES_FLAGS_CFB32	BIT(5)
+#define TDES_FLAGS_OFB		BIT(6)
+
+#define TDES_FLAGS_INIT		BIT(16)
+#define TDES_FLAGS_FAST		BIT(17)
+#define TDES_FLAGS_BUSY		BIT(18)
+
+#define ATMEL_TDES_QUEUE_LENGTH	1
+
+#define CFB8_BLOCK_SIZE		1
+#define CFB16_BLOCK_SIZE	2
+#define CFB32_BLOCK_SIZE	4
+#define CFB64_BLOCK_SIZE	8
+
+
+struct atmel_tdes_dev;
+
+struct atmel_tdes_ctx {
+	struct atmel_tdes_dev *dd;
+
+	int		keylen;
+	u32		key[3*DES_KEY_SIZE / sizeof(u32)];
+	unsigned long	flags;
+};
+
+struct atmel_tdes_reqctx {
+	unsigned long mode;
+};
+
+struct atmel_tdes_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	void __iomem		*io_base;
+
+	struct atmel_tdes_ctx	*ctx;
+	struct device		*dev;
+	struct clk			*iclk;
+	int					irq;
+
+	unsigned long		flags;
+	int			err;
+
+	spinlock_t		lock;
+	struct crypto_queue	queue;
+
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
+
+	struct ablkcipher_request	*req;
+	size_t				total;
+
+	struct scatterlist	*in_sg;
+	size_t				in_offset;
+	struct scatterlist	*out_sg;
+	size_t				out_offset;
+
+	size_t	buflen;
+	size_t	dma_size;
+
+	void	*buf_in;
+	int		dma_in;
+	dma_addr_t	dma_addr_in;
+
+	void	*buf_out;
+	int		dma_out;
+	dma_addr_t	dma_addr_out;
+};
+
+struct atmel_tdes_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+};
+
+static struct atmel_tdes_drv atmel_tdes = {
+	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
+};
+
+static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
+			void *buf, size_t buflen, size_t total, int out)
+{
+	unsigned int count, off = 0;
+
+	while (buflen && total) {
+		count = min((*sg)->length - *offset, total);
+		count = min(count, buflen);
+
+		if (!count)
+			return off;
+
+		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+
+		off += count;
+		buflen -= count;
+		*offset += count;
+		total -= count;
+
+		if (*offset == (*sg)->length) {
+			*sg = sg_next(*sg);
+			if (*sg)
+				*offset = 0;
+			else
+				total = 0;
+		}
+	}
+
+	return off;
+}
+
+static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
+{
+	return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
+					u32 offset, u32 value)
+{
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		atmel_tdes_write(dd, offset, *value);
+}
+
+static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+{
+	struct atmel_tdes_dev *tdes_dd = NULL;
+	struct atmel_tdes_dev *tmp;
+
+	spin_lock_bh(&atmel_tdes.lock);
+	if (!ctx->dd) {
+		list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
+			tdes_dd = tmp;
+			break;
+		}
+		ctx->dd = tdes_dd;
+	} else {
+		tdes_dd = ctx->dd;
+	}
+	spin_unlock_bh(&atmel_tdes.lock);
+
+	return tdes_dd;
+}
+
+static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
+{
+	clk_prepare_enable(dd->iclk);
+
+	if (!(dd->flags & TDES_FLAGS_INIT)) {
+		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
+		dd->flags |= TDES_FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
+{
+	int err;
+	u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+
+	err = atmel_tdes_hw_init(dd);
+
+	if (err)
+		return err;
+
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+
+	/* MR register must be set before IV registers */
+	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
+		valmr |= TDES_MR_KEYMOD_3KEY;
+		valmr |= TDES_MR_TDESMOD_TDES;
+	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
+		valmr |= TDES_MR_KEYMOD_2KEY;
+		valmr |= TDES_MR_TDESMOD_TDES;
+	} else {
+		valmr |= TDES_MR_TDESMOD_DES;
+	}
+
+	if (dd->flags & TDES_FLAGS_CBC) {
+		valmr |= TDES_MR_OPMOD_CBC;
+	} else if (dd->flags & TDES_FLAGS_CFB) {
+		valmr |= TDES_MR_OPMOD_CFB;
+
+		if (dd->flags & TDES_FLAGS_CFB8)
+			valmr |= TDES_MR_CFBS_8b;
+		else if (dd->flags & TDES_FLAGS_CFB16)
+			valmr |= TDES_MR_CFBS_16b;
+		else if (dd->flags & TDES_FLAGS_CFB32)
+			valmr |= TDES_MR_CFBS_32b;
+	} else if (dd->flags & TDES_FLAGS_OFB) {
+		valmr |= TDES_MR_OPMOD_OFB;
+	}
+
+	if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
+		valmr |= TDES_MR_CYPHER_ENC;
+
+	atmel_tdes_write(dd, TDES_CR, valcr);
+	atmel_tdes_write(dd, TDES_MR, valmr);
+
+	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
+						dd->ctx->keylen >> 2);
+
+	if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
+		(dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
+		atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
+	}
+
+	return 0;
+}
+
+static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+{
+	int err = 0;
+	size_t count;
+
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+
+	if (dd->flags & TDES_FLAGS_FAST) {
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+	} else {
+		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+					   dd->dma_size, DMA_FROM_DEVICE);
+
+		/* copy data */
+		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+				dd->buf_out, dd->buflen, dd->dma_size, 1);
+		if (count != dd->dma_size) {
+			err = -EINVAL;
+			pr_err("not all data converted: %u\n", count);
+		}
+	}
+
+	return err;
+}
+
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
+{
+	int err = -ENOMEM;
+
+	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
+	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
+	dd->buflen = PAGE_SIZE;
+	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
+
+	if (!dd->buf_in || !dd->buf_out) {
+		dev_err(dd->dev, "unable to alloc pages.\n");
+		goto err_alloc;
+	}
+
+	/* MAP here */
+	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
+					dd->buflen, DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
+		dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+		err = -EINVAL;
+		goto err_map_in;
+	}
+
+	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
+					dd->buflen, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
+		dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+		err = -EINVAL;
+		goto err_map_out;
+	}
+
+	return 0;
+
+err_map_out:
+	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+		DMA_TO_DEVICE);
+err_map_in:
+	free_page((unsigned long)dd->buf_out);
+	free_page((unsigned long)dd->buf_in);
+err_alloc:
+	if (err)
+		pr_err("error: %d\n", err);
+	return err;
+}
+
+static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+{
+	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
+			 DMA_FROM_DEVICE);
+	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+		DMA_TO_DEVICE);
+	free_page((unsigned long)dd->buf_out);
+	free_page((unsigned long)dd->buf_in);
+}
+
+static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+			       dma_addr_t dma_addr_out, int length)
+{
+	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct atmel_tdes_dev *dd = ctx->dd;
+	int len32;
+
+	dd->dma_size = length;
+
+	if (!(dd->flags & TDES_FLAGS_FAST)) {
+		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+					   DMA_TO_DEVICE);
+	}
+
+	if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+		len32 = DIV_ROUND_UP(length, sizeof(u8));
+	else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+		len32 = DIV_ROUND_UP(length, sizeof(u16));
+	else
+		len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
+	atmel_tdes_write(dd, TDES_TCR, len32);
+	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
+	atmel_tdes_write(dd, TDES_RCR, len32);
+
+	/* Enable Interrupt */
+	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
+
+	/* Start DMA transfer */
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
+
+	return 0;
+}
+
+static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
+					crypto_ablkcipher_reqtfm(dd->req));
+	int err, fast = 0, in, out;
+	size_t count;
+	dma_addr_t addr_in, addr_out;
+
+	if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
+		/* check for alignment */
+		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
+		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
+
+		fast = in && out;
+	}
+
+	if (fast)  {
+		count = min(dd->total, sg_dma_len(dd->in_sg));
+		count = min(count, sg_dma_len(dd->out_sg));
+
+		if (count != dd->total) {
+			pr_err("request length != buffer length\n");
+			return -EINVAL;
+		}
+
+		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			return -EINVAL;
+		}
+
+		err = dma_map_sg(dd->dev, dd->out_sg, 1,
+				DMA_FROM_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			dma_unmap_sg(dd->dev, dd->in_sg, 1,
+				DMA_TO_DEVICE);
+			return -EINVAL;
+		}
+
+		addr_in = sg_dma_address(dd->in_sg);
+		addr_out = sg_dma_address(dd->out_sg);
+
+		dd->flags |= TDES_FLAGS_FAST;
+
+	} else {
+		/* use cache buffers */
+		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
+				dd->buf_in, dd->buflen, dd->total, 0);
+
+		addr_in = dd->dma_addr_in;
+		addr_out = dd->dma_addr_out;
+
+		dd->flags &= ~TDES_FLAGS_FAST;
+
+	}
+
+	dd->total -= count;
+
+	err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+	if (err) {
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+	}
+
+	return err;
+}
+
+
+static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
+{
+	struct ablkcipher_request *req = dd->req;
+
+	clk_disable_unprepare(dd->iclk);
+
+	dd->flags &= ~TDES_FLAGS_BUSY;
+
+	req->base.complete(&req->base, err);
+}
+
+static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
+			       struct ablkcipher_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct atmel_tdes_ctx *ctx;
+	struct atmel_tdes_reqctx *rctx;
+	unsigned long flags;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (dd->flags & TDES_FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		dd->flags |= TDES_FLAGS_BUSY;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ablkcipher_request_cast(async_req);
+
+	/* assign new request to device */
+	dd->req = req;
+	dd->total = req->nbytes;
+	dd->in_offset = 0;
+	dd->in_sg = req->src;
+	dd->out_offset = 0;
+	dd->out_sg = req->dst;
+
+	rctx = ablkcipher_request_ctx(req);
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	rctx->mode &= TDES_FLAGS_MODE_MASK;
+	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
+	dd->ctx = ctx;
+	ctx->dd = dd;
+
+	err = atmel_tdes_write_ctrl(dd);
+	if (!err)
+		err = atmel_tdes_crypt_dma_start(dd);
+	if (err) {
+		/* des_task will not finish it, so do it here */
+		atmel_tdes_finish_req(dd, err);
+		tasklet_schedule(&dd->queue_task);
+	}
+
+	return ret;
+}
+
+
+static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct atmel_tdes_dev *dd;
+
+	if (mode & TDES_FLAGS_CFB8) {
+		if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB8 blocks\n");
+			return -EINVAL;
+		}
+	} else if (mode & TDES_FLAGS_CFB16) {
+		if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB16 blocks\n");
+			return -EINVAL;
+		}
+	} else if (mode & TDES_FLAGS_CFB32) {
+		if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB32 blocks\n");
+			return -EINVAL;
+		}
+	} else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+		pr_err("request size is not exact amount of DES blocks\n");
+		return -EINVAL;
+	}
+
+	dd = atmel_tdes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx->mode = mode;
+
+	return atmel_tdes_handle_queue(dd, req);
+}
+
+static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	u32 tmp[DES_EXPKEY_WORDS];
+	int err;
+	struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
+
+	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (keylen != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	err = des_ekey(tmp, key);
+	if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	const char *alg_name;
+
+	alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+
+	/*
+	 * HW bug in cfb 3-keys mode.
+	 */
+	if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+}
+
+static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, 0);
+}
+
+static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+}
+
+static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
+}
+static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+}
+
+static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+}
+
+static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+						TDES_FLAGS_CFB8);
+}
+
+static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+}
+
+static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+						TDES_FLAGS_CFB16);
+}
+
+static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+}
+
+static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+						TDES_FLAGS_CFB32);
+}
+
+static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+}
+
+static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+}
+
+static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
+}
+
+static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+
+	return 0;
+}
+
+static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg tdes_algs[] = {
+{
+	.cra_name		= "ecb(des)",
+	.cra_driver_name	= "atmel-ecb-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_ecb_encrypt,
+		.decrypt	= atmel_tdes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des)",
+	.cra_driver_name	= "atmel-cbc-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cbc_encrypt,
+		.decrypt	= atmel_tdes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb(des)",
+	.cra_driver_name	= "atmel-cfb-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb_encrypt,
+		.decrypt	= atmel_tdes_cfb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb8(des)",
+	.cra_driver_name	= "atmel-cfb8-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB8_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb8_encrypt,
+		.decrypt	= atmel_tdes_cfb8_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb16(des)",
+	.cra_driver_name	= "atmel-cfb16-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB16_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb16_encrypt,
+		.decrypt	= atmel_tdes_cfb16_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb32(des)",
+	.cra_driver_name	= "atmel-cfb32-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB32_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb32_encrypt,
+		.decrypt	= atmel_tdes_cfb32_decrypt,
+	}
+},
+{
+	.cra_name		= "ofb(des)",
+	.cra_driver_name	= "atmel-ofb-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_ofb_encrypt,
+		.decrypt	= atmel_tdes_ofb_decrypt,
+	}
+},
+{
+	.cra_name		= "ecb(des3_ede)",
+	.cra_driver_name	= "atmel-ecb-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2 * DES_KEY_SIZE,
+		.max_keysize	= 3 * DES_KEY_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_ecb_encrypt,
+		.decrypt	= atmel_tdes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des3_ede)",
+	.cra_driver_name	= "atmel-cbc-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 3*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cbc_encrypt,
+		.decrypt	= atmel_tdes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb(des3_ede)",
+	.cra_driver_name	= "atmel-cfb-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb_encrypt,
+		.decrypt	= atmel_tdes_cfb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb8(des3_ede)",
+	.cra_driver_name	= "atmel-cfb8-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB8_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb8_encrypt,
+		.decrypt	= atmel_tdes_cfb8_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb16(des3_ede)",
+	.cra_driver_name	= "atmel-cfb16-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB16_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb16_encrypt,
+		.decrypt	= atmel_tdes_cfb16_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb32(des3_ede)",
+	.cra_driver_name	= "atmel-cfb32-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB32_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb32_encrypt,
+		.decrypt	= atmel_tdes_cfb32_decrypt,
+	}
+},
+{
+	.cra_name		= "ofb(des3_ede)",
+	.cra_driver_name	= "atmel-ofb-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_exit		= atmel_tdes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 3*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_ofb_encrypt,
+		.decrypt	= atmel_tdes_ofb_decrypt,
+	}
+},
+};
+
+static void atmel_tdes_queue_task(unsigned long data)
+{
+	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
+
+	atmel_tdes_handle_queue(dd, NULL);
+}
+
+static void atmel_tdes_done_task(unsigned long data)
+{
+	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
+	int err;
+
+	err = atmel_tdes_crypt_dma_stop(dd);
+
+	err = dd->err ? : err;
+
+	if (dd->total && !err) {
+		err = atmel_tdes_crypt_dma_start(dd);
+		if (!err)
+			return;
+	}
+
+	atmel_tdes_finish_req(dd, err);
+	atmel_tdes_handle_queue(dd, NULL);
+}
+
+static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
+{
+	struct atmel_tdes_dev *tdes_dd = dev_id;
+	u32 reg;
+
+	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
+	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
+		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
+		if (TDES_FLAGS_BUSY & tdes_dd->flags)
+			tasklet_schedule(&tdes_dd->done_task);
+		else
+			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
+		crypto_unregister_alg(&tdes_algs[i]);
+}
+
+static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
+{
+	int err, i, j;
+
+	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+		INIT_LIST_HEAD(&tdes_algs[i].cra_list);
+		err = crypto_register_alg(&tdes_algs[i]);
+		if (err)
+			goto err_tdes_algs;
+	}
+
+	return 0;
+
+err_tdes_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_alg(&tdes_algs[j]);
+
+	return err;
+}
+
+static int __devinit atmel_tdes_probe(struct platform_device *pdev)
+{
+	struct atmel_tdes_dev *tdes_dd;
+	struct device *dev = &pdev->dev;
+	struct resource *tdes_res;
+	unsigned long tdes_phys_size;
+	int err;
+
+	tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
+	if (tdes_dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		err = -ENOMEM;
+		goto tdes_dd_err;
+	}
+
+	tdes_dd->dev = dev;
+
+	platform_set_drvdata(pdev, tdes_dd);
+
+	INIT_LIST_HEAD(&tdes_dd->list);
+
+	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
+					(unsigned long)tdes_dd);
+	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
+					(unsigned long)tdes_dd);
+
+	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
+
+	tdes_dd->irq = -1;
+
+	/* Get the base address */
+	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!tdes_res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	tdes_dd->phys_base = tdes_res->start;
+	tdes_phys_size = resource_size(tdes_res);
+
+	/* Get the IRQ */
+	tdes_dd->irq = platform_get_irq(pdev,  0);
+	if (tdes_dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = tdes_dd->irq;
+		goto res_err;
+	}
+
+	err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED,
+			"atmel-tdes", tdes_dd);
+	if (err) {
+		dev_err(dev, "unable to request tdes irq.\n");
+		goto tdes_irq_err;
+	}
+
+	/* Initializing the clock */
+	tdes_dd->iclk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(tdes_dd->iclk)) {
+		dev_err(dev, "clock intialization failed.\n");
+		err = PTR_ERR(tdes_dd->iclk);
+		goto clk_err;
+	}
+
+	tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size);
+	if (!tdes_dd->io_base) {
+		dev_err(dev, "can't ioremap\n");
+		err = -ENOMEM;
+		goto tdes_io_err;
+	}
+
+	err = atmel_tdes_dma_init(tdes_dd);
+	if (err)
+		goto err_tdes_dma;
+
+	spin_lock(&atmel_tdes.lock);
+	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
+	spin_unlock(&atmel_tdes.lock);
+
+	err = atmel_tdes_register_algs(tdes_dd);
+	if (err)
+		goto err_algs;
+
+	dev_info(dev, "Atmel DES/TDES\n");
+
+	return 0;
+
+err_algs:
+	spin_lock(&atmel_tdes.lock);
+	list_del(&tdes_dd->list);
+	spin_unlock(&atmel_tdes.lock);
+	atmel_tdes_dma_cleanup(tdes_dd);
+err_tdes_dma:
+	iounmap(tdes_dd->io_base);
+tdes_io_err:
+	clk_put(tdes_dd->iclk);
+clk_err:
+	free_irq(tdes_dd->irq, tdes_dd);
+tdes_irq_err:
+res_err:
+	tasklet_kill(&tdes_dd->done_task);
+	tasklet_kill(&tdes_dd->queue_task);
+	kfree(tdes_dd);
+	tdes_dd = NULL;
+tdes_dd_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int __devexit atmel_tdes_remove(struct platform_device *pdev)
+{
+	static struct atmel_tdes_dev *tdes_dd;
+
+	tdes_dd = platform_get_drvdata(pdev);
+	if (!tdes_dd)
+		return -ENODEV;
+	spin_lock(&atmel_tdes.lock);
+	list_del(&tdes_dd->list);
+	spin_unlock(&atmel_tdes.lock);
+
+	atmel_tdes_unregister_algs(tdes_dd);
+
+	tasklet_kill(&tdes_dd->done_task);
+	tasklet_kill(&tdes_dd->queue_task);
+
+	atmel_tdes_dma_cleanup(tdes_dd);
+
+	iounmap(tdes_dd->io_base);
+
+	clk_put(tdes_dd->iclk);
+
+	if (tdes_dd->irq >= 0)
+		free_irq(tdes_dd->irq, tdes_dd);
+
+	kfree(tdes_dd);
+	tdes_dd = NULL;
+
+	return 0;
+}
+
+static struct platform_driver atmel_tdes_driver = {
+	.probe		= atmel_tdes_probe,
+	.remove		= __devexit_p(atmel_tdes_remove),
+	.driver		= {
+		.name	= "atmel_tdes",
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(atmel_tdes_driver);
+
+MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukr?a Electromatique");
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/5] crypto: add Atmel SHA1/SHA256 driver
  2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
                   ` (2 preceding siblings ...)
  2012-07-01 17:19 ` [PATCH 3/5] crypto: add Atmel DES/TDES driver Eric Bénard
@ 2012-07-01 17:19 ` Eric Bénard
  2012-07-01 17:19 ` [PATCH 5/5] crypto: add new tests to tcrypt Eric Bénard
  2012-07-11  3:25 ` [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Herbert Xu
  5 siblings, 0 replies; 9+ messages in thread
From: Eric Bénard @ 2012-07-01 17:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Nicolas Royer <nicolas@eukrea.com>

Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric B?nard <eric@eukrea.com>
Tested-by: Eric B?nard <eric@eukrea.com>
---
 drivers/crypto/Kconfig          |   14 +
 drivers/crypto/Makefile         |    1 +
 drivers/crypto/atmel-sha-regs.h |   46 ++
 drivers/crypto/atmel-sha.c      | 1112 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 1173 insertions(+), 0 deletions(-)
 create mode 100644 drivers/crypto/atmel-sha-regs.h
 create mode 100644 drivers/crypto/atmel-sha.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9ac7128..631014b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -358,4 +358,18 @@ config CRYPTO_DEV_ATMEL_TDES
 	  To compile this driver as a module, choose M here: the module
 	  will be called atmel-tdes.
 
+config CRYPTO_DEV_ATMEL_SHA
+	tristate "Support for Atmel SHA1/SHA256 hw accelerator"
+	depends on ARCH_AT91
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_ALGAPI
+	help
+	  Some Atmel processors have SHA1/SHA256 hw accelerator.
+	  Select this if you want to use the Atmel module for
+	  SHA1/SHA256 algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-sha.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 211fdc2..387bee1 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
new file mode 100644
index 0000000..dc53a20
--- /dev/null
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -0,0 +1,46 @@
+#ifndef __ATMEL_SHA_REGS_H__
+#define __ATMEL_SHA_REGS_H__
+
+#define SHA_REG_DIGEST(x)		(0x80 + ((x) * 0x04))
+#define SHA_REG_DIN(x)			(0x40 + ((x) * 0x04))
+
+#define SHA_CR				0x00
+#define SHA_CR_START			(1 << 0)
+#define SHA_CR_FIRST			(1 << 4)
+#define SHA_CR_SWRST			(1 << 8)
+
+#define SHA_MR				0x04
+#define SHA_MR_MODE_MASK		(0x3 << 0)
+#define SHA_MR_MODE_MANUAL		0x0
+#define SHA_MR_MODE_AUTO		0x1
+#define SHA_MR_MODE_PDC			0x2
+#define	SHA_MR_DUALBUFF			(1 << 3)
+#define SHA_MR_PROCDLY			(1 << 4)
+#define SHA_MR_ALGO_SHA1		(0 << 8)
+#define SHA_MR_ALGO_SHA256		(1 << 8)
+
+#define SHA_IER				0x10
+#define SHA_IDR				0x14
+#define SHA_IMR				0x18
+#define SHA_ISR				0x1C
+#define SHA_INT_DATARDY			(1 << 0)
+#define SHA_INT_ENDTX			(1 << 1)
+#define SHA_INT_TXBUFE			(1 << 2)
+#define SHA_INT_URAD			(1 << 8)
+#define SHA_ISR_URAT_MASK		(0x7 << 12)
+#define SHA_ISR_URAT_IDR		(0x0 << 12)
+#define SHA_ISR_URAT_ODR		(0x1 << 12)
+#define SHA_ISR_URAT_MR			(0x2 << 12)
+#define SHA_ISR_URAT_WO			(0x5 << 12)
+
+#define SHA_TPR				0x108
+#define SHA_TCR				0x10C
+#define SHA_TNPR			0x118
+#define SHA_TNCR			0x11C
+#define SHA_PTCR			0x120
+#define SHA_PTCR_TXTEN		(1 << 8)
+#define SHA_PTCR_TXTDIS		(1 << 9)
+#define SHA_PTSR			0x124
+#define SHA_PTSR_TXTEN		(1 << 8)
+
+#endif /* __ATMEL_SHA_REGS_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
new file mode 100644
index 0000000..f938b9d
--- /dev/null
+++ b/drivers/crypto/atmel-sha.c
@@ -0,0 +1,1112 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL SHA1/SHA256 HW acceleration.
+ *
+ * Copyright (c) 2012 Eukr?a Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-sham.c drivers.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include "atmel-sha-regs.h"
+
+/* SHA flags */
+#define SHA_FLAGS_BUSY			BIT(0)
+#define	SHA_FLAGS_FINAL			BIT(1)
+#define SHA_FLAGS_DMA_ACTIVE	BIT(2)
+#define SHA_FLAGS_OUTPUT_READY	BIT(3)
+#define SHA_FLAGS_INIT			BIT(4)
+#define SHA_FLAGS_CPU			BIT(5)
+#define SHA_FLAGS_DMA_READY		BIT(6)
+
+#define SHA_FLAGS_FINUP		BIT(16)
+#define SHA_FLAGS_SG		BIT(17)
+#define SHA_FLAGS_SHA1		BIT(18)
+#define SHA_FLAGS_SHA256	BIT(19)
+#define SHA_FLAGS_ERROR		BIT(20)
+#define SHA_FLAGS_PAD		BIT(21)
+
+#define SHA_FLAGS_DUALBUFF	BIT(24)
+
+#define SHA_OP_UPDATE	1
+#define SHA_OP_FINAL	2
+
+#define SHA_BUFFER_LEN		PAGE_SIZE
+
+#define ATMEL_SHA_DMA_THRESHOLD		56
+
+
+struct atmel_sha_dev;
+
+struct atmel_sha_reqctx {
+	struct atmel_sha_dev	*dd;
+	unsigned long	flags;
+	unsigned long	op;
+
+	u8	digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+	size_t	digcnt;
+	size_t	bufcnt;
+	size_t	buflen;
+	dma_addr_t	dma_addr;
+
+	/* walk state */
+	struct scatterlist	*sg;
+	unsigned int	offset;	/* offset in current sg */
+	unsigned int	total;	/* total request */
+
+	u8	buffer[0] __aligned(sizeof(u32));
+};
+
+struct atmel_sha_ctx {
+	struct atmel_sha_dev	*dd;
+
+	unsigned long		flags;
+
+	/* fallback stuff */
+	struct crypto_shash	*fallback;
+
+};
+
+#define ATMEL_SHA_QUEUE_LENGTH	1
+
+struct atmel_sha_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	struct device		*dev;
+	struct clk			*iclk;
+	int					irq;
+	void __iomem		*io_base;
+
+	spinlock_t		lock;
+	int			err;
+	struct tasklet_struct	done_task;
+
+	unsigned long		flags;
+	struct crypto_queue	queue;
+	struct ahash_request	*req;
+};
+
+struct atmel_sha_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+};
+
+static struct atmel_sha_drv atmel_sha = {
+	.dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
+};
+
+static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
+{
+	return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_sha_write(struct atmel_sha_dev *dd,
+					u32 offset, u32 value)
+{
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
+{
+	atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
+
+	if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
+		dd->flags |= SHA_FLAGS_DUALBUFF;
+}
+
+static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
+{
+	size_t count;
+
+	while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
+		count = min(ctx->sg->length - ctx->offset, ctx->total);
+		count = min(count, ctx->buflen - ctx->bufcnt);
+
+		if (count <= 0)
+			break;
+
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+			ctx->offset, count, 0);
+
+		ctx->bufcnt += count;
+		ctx->offset += count;
+		ctx->total -= count;
+
+		if (ctx->offset == ctx->sg->length) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+			else
+				ctx->total = 0;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * The purpose of this padding is to ensure that the padded message
+ * is a multiple of 512 bits. The bit "1" is appended at the end of
+ * the message followed by "padlen-1" zero bits. Then a 64 bits block
+ * equals to the message length in bits is appended.
+ *
+ * padlen is calculated as followed:
+ *  - if message length < 56 bytes then padlen = 56 - message length
+ *  - else padlen = 64 + 56 - message length
+ */
+static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
+{
+	unsigned int index, padlen;
+	u64 bits;
+	u64 size;
+
+	bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
+	size = cpu_to_be64(bits);
+
+	index = ctx->bufcnt & 0x3f;
+	padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+	*(ctx->buffer + ctx->bufcnt) = 0x80;
+	memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+	memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
+	ctx->bufcnt += padlen + 8;
+	ctx->flags |= SHA_FLAGS_PAD;
+}
+
+static int atmel_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_dev *dd = NULL;
+	struct atmel_sha_dev *tmp;
+
+	spin_lock_bh(&atmel_sha.lock);
+	if (!tctx->dd) {
+		list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
+			dd = tmp;
+			break;
+		}
+		tctx->dd = dd;
+	} else {
+		dd = tctx->dd;
+	}
+
+	spin_unlock_bh(&atmel_sha.lock);
+
+	ctx->dd = dd;
+
+	ctx->flags = 0;
+
+	dev_dbg(dd->dev, "init: digest size: %d\n",
+		crypto_ahash_digestsize(tfm));
+
+	if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+		ctx->flags |= SHA_FLAGS_SHA1;
+	else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
+		ctx->flags |= SHA_FLAGS_SHA256;
+
+	ctx->bufcnt = 0;
+	ctx->digcnt = 0;
+	ctx->buflen = SHA_BUFFER_LEN;
+
+	return 0;
+}
+
+static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+
+	if (likely(dma)) {
+		atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
+		valmr = SHA_MR_MODE_PDC;
+		if (dd->flags & SHA_FLAGS_DUALBUFF)
+			valmr = SHA_MR_DUALBUFF;
+	} else {
+		atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+	}
+
+	if (ctx->flags & SHA_FLAGS_SHA256)
+		valmr |= SHA_MR_ALGO_SHA256;
+
+	/* Setting CR_FIRST only for the first iteration */
+	if (!ctx->digcnt)
+		valcr = SHA_CR_FIRST;
+
+	atmel_sha_write(dd, SHA_CR, valcr);
+	atmel_sha_write(dd, SHA_MR, valmr);
+}
+
+static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
+			      size_t length, int final)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	int count, len32;
+	const u32 *buffer = (const u32 *)buf;
+
+	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+						ctx->digcnt, length, final);
+
+	atmel_sha_write_ctrl(dd, 0);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt += length;
+
+	if (final)
+		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	dd->flags |= SHA_FLAGS_CPU;
+
+	for (count = 0; count < len32; count++)
+		atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+	return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	int len32;
+
+	dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
+						ctx->digcnt, length1, final);
+
+	len32 = DIV_ROUND_UP(length1, sizeof(u32));
+	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
+	atmel_sha_write(dd, SHA_TPR, dma_addr1);
+	atmel_sha_write(dd, SHA_TCR, len32);
+
+	len32 = DIV_ROUND_UP(length2, sizeof(u32));
+	atmel_sha_write(dd, SHA_TNPR, dma_addr2);
+	atmel_sha_write(dd, SHA_TNCR, len32);
+
+	atmel_sha_write_ctrl(dd, 1);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt += length1;
+
+	if (final)
+		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
+
+	/* Start DMA transfer */
+	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
+
+	return -EINPROGRESS;
+}
+
+static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	int bufcnt;
+
+	atmel_sha_append_sg(ctx);
+	atmel_sha_fill_padding(ctx, 0);
+
+	bufcnt = ctx->bufcnt;
+	ctx->bufcnt = 0;
+
+	return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
+					struct atmel_sha_reqctx *ctx,
+					size_t length, int final)
+{
+	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
+				ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
+				SHA1_BLOCK_SIZE);
+		return -EINVAL;
+	}
+
+	ctx->flags &= ~SHA_FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
+}
+
+static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	unsigned int final;
+	size_t count;
+
+	atmel_sha_append_sg(ctx);
+
+	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+	dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+					 ctx->bufcnt, ctx->digcnt, final);
+
+	if (final)
+		atmel_sha_fill_padding(ctx, 0);
+
+	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		return atmel_sha_xmit_dma_map(dd, ctx, count, final);
+	}
+
+	return 0;
+}
+
+static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	unsigned int length, final, tail;
+	struct scatterlist *sg;
+	unsigned int count;
+
+	if (!ctx->total)
+		return 0;
+
+	if (ctx->bufcnt || ctx->offset)
+		return atmel_sha_update_dma_slow(dd);
+
+	dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
+			ctx->digcnt, ctx->bufcnt, ctx->total);
+
+	sg = ctx->sg;
+
+	if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+		return atmel_sha_update_dma_slow(dd);
+
+	if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
+		/* size is not SHA1_BLOCK_SIZE aligned */
+		return atmel_sha_update_dma_slow(dd);
+
+	length = min(ctx->total, sg->length);
+
+	if (sg_is_last(sg)) {
+		if (!(ctx->flags & SHA_FLAGS_FINUP)) {
+			/* not last sg must be SHA1_BLOCK_SIZE aligned */
+			tail = length & (SHA1_BLOCK_SIZE - 1);
+			length -= tail;
+			if (length == 0) {
+				/* offset where to start slow */
+				ctx->offset = length;
+				return atmel_sha_update_dma_slow(dd);
+			}
+		}
+	}
+
+	ctx->total -= length;
+	ctx->offset = length; /* offset where to start slow */
+
+	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+	/* Add padding */
+	if (final) {
+		tail = length & (SHA1_BLOCK_SIZE - 1);
+		length -= tail;
+		ctx->total += tail;
+		ctx->offset = length; /* offset where to start slow */
+
+		sg = ctx->sg;
+		atmel_sha_append_sg(ctx);
+
+		atmel_sha_fill_padding(ctx, length);
+
+		ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
+			ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+		if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+			dev_err(dd->dev, "dma %u bytes error\n",
+				ctx->buflen + SHA1_BLOCK_SIZE);
+			return -EINVAL;
+		}
+
+		if (length == 0) {
+			ctx->flags &= ~SHA_FLAGS_SG;
+			count = ctx->bufcnt;
+			ctx->bufcnt = 0;
+			return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
+					0, final);
+		} else {
+			ctx->sg = sg;
+			if (!dma_map_sg(dd->dev, ctx->sg, 1,
+				DMA_TO_DEVICE)) {
+					dev_err(dd->dev, "dma_map_sg  error\n");
+					return -EINVAL;
+			}
+
+			ctx->flags |= SHA_FLAGS_SG;
+
+			count = ctx->bufcnt;
+			ctx->bufcnt = 0;
+			return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
+					length, ctx->dma_addr, count, final);
+		}
+	}
+
+	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+		dev_err(dd->dev, "dma_map_sg  error\n");
+		return -EINVAL;
+	}
+
+	ctx->flags |= SHA_FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
+								0, final);
+}
+
+static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+
+	if (ctx->flags & SHA_FLAGS_SG) {
+		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+		if (ctx->sg->length == ctx->offset) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+		}
+		if (ctx->flags & SHA_FLAGS_PAD)
+			dma_unmap_single(dd->dev, ctx->dma_addr,
+				ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+	} else {
+		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
+						SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+	}
+
+	return 0;
+}
+
+static int atmel_sha_update_req(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err;
+
+	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+		 ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
+
+	if (ctx->flags & SHA_FLAGS_CPU)
+		err = atmel_sha_update_cpu(dd);
+	else
+		err = atmel_sha_update_dma_start(dd);
+
+	/* wait for dma completion before can take more data */
+	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
+			err, ctx->digcnt);
+
+	return err;
+}
+
+static int atmel_sha_final_req(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err = 0;
+	int count;
+
+	if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
+		atmel_sha_fill_padding(ctx, 0);
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
+	}
+	/* faster to handle last block with cpu */
+	else {
+		atmel_sha_fill_padding(ctx, 0);
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
+	}
+
+	dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+	return err;
+}
+
+static void atmel_sha_copy_hash(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)ctx->digest;
+	int i;
+
+	if (likely(ctx->flags & SHA_FLAGS_SHA1))
+		for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+			hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+	else
+		for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
+			hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+}
+
+static void atmel_sha_copy_ready_hash(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->result)
+		return;
+
+	if (likely(ctx->flags & SHA_FLAGS_SHA1))
+		memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
+	else
+		memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+}
+
+static int atmel_sha_finish(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_dev *dd = ctx->dd;
+	int err = 0;
+
+	if (ctx->digcnt)
+		atmel_sha_copy_ready_hash(req);
+
+	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
+		ctx->bufcnt);
+
+	return err;
+}
+
+static void atmel_sha_finish_req(struct ahash_request *req, int err)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_dev *dd = ctx->dd;
+
+	if (!err) {
+		atmel_sha_copy_hash(req);
+		if (SHA_FLAGS_FINAL & dd->flags)
+			err = atmel_sha_finish(req);
+	} else {
+		ctx->flags |= SHA_FLAGS_ERROR;
+	}
+
+	/* atomic operation is not needed here */
+	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
+			SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
+
+	clk_disable_unprepare(dd->iclk);
+
+	if (req->base.complete)
+		req->base.complete(&req->base, err);
+
+	/* handle new request */
+	tasklet_schedule(&dd->done_task);
+}
+
+static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
+{
+	clk_prepare_enable(dd->iclk);
+
+	if (SHA_FLAGS_INIT & dd->flags) {
+		atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
+		atmel_sha_dualbuff_test(dd);
+		dd->flags |= SHA_FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
+				  struct ahash_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct atmel_sha_reqctx *ctx;
+	unsigned long flags;
+	int err = 0, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+
+	if (SHA_FLAGS_BUSY & dd->flags) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		dd->flags |= SHA_FLAGS_BUSY;
+
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ahash_request_cast(async_req);
+	dd->req = req;
+	ctx = ahash_request_ctx(req);
+
+	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+						ctx->op, req->nbytes);
+
+	err = atmel_sha_hw_init(dd);
+
+	if (err)
+		goto err1;
+
+	if (ctx->op == SHA_OP_UPDATE) {
+		err = atmel_sha_update_req(dd);
+		if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
+			/* no final() after finup() */
+			err = atmel_sha_final_req(dd);
+		}
+	} else if (ctx->op == SHA_OP_FINAL) {
+		err = atmel_sha_final_req(dd);
+	}
+
+err1:
+	if (err != -EINPROGRESS)
+		/* done_task will not finish it, so do it here */
+		atmel_sha_finish_req(req, err);
+
+	dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+	return ret;
+}
+
+static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct atmel_sha_dev *dd = tctx->dd;
+
+	ctx->op = op;
+
+	return atmel_sha_handle_queue(dd, req);
+}
+
+static int atmel_sha_update(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->nbytes)
+		return 0;
+
+	ctx->total = req->nbytes;
+	ctx->sg = req->src;
+	ctx->offset = 0;
+
+	if (ctx->flags & SHA_FLAGS_FINUP) {
+		if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
+			/* faster to use CPU for short transfers */
+			ctx->flags |= SHA_FLAGS_CPU;
+	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
+		atmel_sha_append_sg(ctx);
+		return 0;
+	}
+	return atmel_sha_enqueue(req, SHA_OP_UPDATE);
+}
+
+static int atmel_sha_final(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct atmel_sha_dev *dd = tctx->dd;
+
+	int err = 0;
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	if (ctx->flags & SHA_FLAGS_ERROR)
+		return 0; /* uncompleted hash is not needed */
+
+	if (ctx->bufcnt) {
+		return atmel_sha_enqueue(req, SHA_OP_FINAL);
+	} else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
+		err = atmel_sha_hw_init(dd);
+		if (err)
+			goto err1;
+
+		dd->flags |= SHA_FLAGS_BUSY;
+		err = atmel_sha_final_req(dd);
+	} else {
+		/* copy ready hash (+ finalize hmac) */
+		return atmel_sha_finish(req);
+	}
+
+err1:
+	if (err != -EINPROGRESS)
+		/* done_task will not finish it, so do it here */
+		atmel_sha_finish_req(req, err);
+
+	return err;
+}
+
+static int atmel_sha_finup(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err1, err2;
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	err1 = atmel_sha_update(req);
+	if (err1 == -EINPROGRESS || err1 == -EBUSY)
+		return err1;
+
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if udpate() failed, except EINPROGRESS
+	 */
+	err2 = atmel_sha_final(req);
+
+	return err1 ?: err2;
+}
+
+static int atmel_sha_digest(struct ahash_request *req)
+{
+	return atmel_sha_init(req) ?: atmel_sha_finup(req);
+}
+
+static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+
+	/* Allocate a fallback and abort if it failed. */
+	tctx->fallback = crypto_alloc_shash(alg_name, 0,
+					    CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(tctx->fallback)) {
+		pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
+				alg_name);
+		return PTR_ERR(tctx->fallback);
+	}
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct atmel_sha_reqctx) +
+				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int atmel_sha_cra_init(struct crypto_tfm *tfm)
+{
+	return atmel_sha_cra_init_alg(tfm, NULL);
+}
+
+static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
+{
+	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(tctx->fallback);
+	tctx->fallback = NULL;
+}
+
+static struct ahash_alg sha_algs[] = {
+{
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.halg = {
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.base	= {
+			.cra_name		= "sha1",
+			.cra_driver_name	= "atmel-sha1",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+			.cra_blocksize		= SHA1_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+			.cra_exit		= atmel_sha_cra_exit,
+		}
+	}
+},
+{
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.halg = {
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.base	= {
+			.cra_name		= "sha256",
+			.cra_driver_name	= "atmel-sha256",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+			.cra_blocksize		= SHA256_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+			.cra_exit		= atmel_sha_cra_exit,
+		}
+	}
+},
+};
+
+static void atmel_sha_done_task(unsigned long data)
+{
+	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+	int err = 0;
+
+	if (!(SHA_FLAGS_BUSY & dd->flags)) {
+		atmel_sha_handle_queue(dd, NULL);
+		return;
+	}
+
+	if (SHA_FLAGS_CPU & dd->flags) {
+		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
+			dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
+			goto finish;
+		}
+	} else if (SHA_FLAGS_DMA_READY & dd->flags) {
+		if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
+			dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
+			atmel_sha_update_dma_stop(dd);
+			if (dd->err) {
+				err = dd->err;
+				goto finish;
+			}
+		}
+		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
+			/* hash or semi-hash ready */
+			dd->flags &= ~(SHA_FLAGS_DMA_READY |
+						SHA_FLAGS_OUTPUT_READY);
+			err = atmel_sha_update_dma_start(dd);
+			if (err != -EINPROGRESS)
+				goto finish;
+		}
+	}
+	return;
+
+finish:
+	/* finish curent request */
+	atmel_sha_finish_req(dd->req, err);
+}
+
+static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
+{
+	struct atmel_sha_dev *sha_dd = dev_id;
+	u32 reg;
+
+	reg = atmel_sha_read(sha_dd, SHA_ISR);
+	if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
+		atmel_sha_write(sha_dd, SHA_IDR, reg);
+		if (SHA_FLAGS_BUSY & sha_dd->flags) {
+			sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
+			if (!(SHA_FLAGS_CPU & sha_dd->flags))
+				sha_dd->flags |= SHA_FLAGS_DMA_READY;
+			tasklet_schedule(&sha_dd->done_task);
+		} else {
+			dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
+		}
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
+		crypto_unregister_ahash(&sha_algs[i]);
+}
+
+static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
+{
+	int err, i, j;
+
+	for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+		err = crypto_register_ahash(&sha_algs[i]);
+		if (err)
+			goto err_sha_algs;
+	}
+
+	return 0;
+
+err_sha_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(&sha_algs[j]);
+
+	return err;
+}
+
+static int __devinit atmel_sha_probe(struct platform_device *pdev)
+{
+	struct atmel_sha_dev *sha_dd;
+	struct device *dev = &pdev->dev;
+	struct resource *sha_res;
+	unsigned long sha_phys_size;
+	int err;
+
+	sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
+	if (sha_dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		err = -ENOMEM;
+		goto sha_dd_err;
+	}
+
+	sha_dd->dev = dev;
+
+	platform_set_drvdata(pdev, sha_dd);
+
+	INIT_LIST_HEAD(&sha_dd->list);
+
+	tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
+					(unsigned long)sha_dd);
+
+	crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
+
+	sha_dd->irq = -1;
+
+	/* Get the base address */
+	sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!sha_res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	sha_dd->phys_base = sha_res->start;
+	sha_phys_size = resource_size(sha_res);
+
+	/* Get the IRQ */
+	sha_dd->irq = platform_get_irq(pdev,  0);
+	if (sha_dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = sha_dd->irq;
+		goto res_err;
+	}
+
+	err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
+						sha_dd);
+	if (err) {
+		dev_err(dev, "unable to request sha irq.\n");
+		goto res_err;
+	}
+
+	/* Initializing the clock */
+	sha_dd->iclk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(sha_dd->iclk)) {
+		dev_err(dev, "clock intialization failed.\n");
+		err = PTR_ERR(sha_dd->iclk);
+		goto clk_err;
+	}
+
+	sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
+	if (!sha_dd->io_base) {
+		dev_err(dev, "can't ioremap\n");
+		err = -ENOMEM;
+		goto sha_io_err;
+	}
+
+	spin_lock(&atmel_sha.lock);
+	list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
+	spin_unlock(&atmel_sha.lock);
+
+	err = atmel_sha_register_algs(sha_dd);
+	if (err)
+		goto err_algs;
+
+	dev_info(dev, "Atmel SHA1/SHA256\n");
+
+	return 0;
+
+err_algs:
+	spin_lock(&atmel_sha.lock);
+	list_del(&sha_dd->list);
+	spin_unlock(&atmel_sha.lock);
+	iounmap(sha_dd->io_base);
+sha_io_err:
+	clk_put(sha_dd->iclk);
+clk_err:
+	free_irq(sha_dd->irq, sha_dd);
+res_err:
+	tasklet_kill(&sha_dd->done_task);
+	kfree(sha_dd);
+	sha_dd = NULL;
+sha_dd_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int __devexit atmel_sha_remove(struct platform_device *pdev)
+{
+	static struct atmel_sha_dev *sha_dd;
+
+	sha_dd = platform_get_drvdata(pdev);
+	if (!sha_dd)
+		return -ENODEV;
+	spin_lock(&atmel_sha.lock);
+	list_del(&sha_dd->list);
+	spin_unlock(&atmel_sha.lock);
+
+	atmel_sha_unregister_algs(sha_dd);
+
+	tasklet_kill(&sha_dd->done_task);
+
+	iounmap(sha_dd->io_base);
+
+	clk_put(sha_dd->iclk);
+
+	if (sha_dd->irq >= 0)
+		free_irq(sha_dd->irq, sha_dd);
+
+	kfree(sha_dd);
+	sha_dd = NULL;
+
+	return 0;
+}
+
+static struct platform_driver atmel_sha_driver = {
+	.probe		= atmel_sha_probe,
+	.remove		= __devexit_p(atmel_sha_remove),
+	.driver		= {
+		.name	= "atmel_sha",
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(atmel_sha_driver);
+
+MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukr?a Electromatique");
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 5/5] crypto: add new tests to tcrypt
  2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
                   ` (3 preceding siblings ...)
  2012-07-01 17:19 ` [PATCH 4/5] crypto: add Atmel SHA1/SHA256 driver Eric Bénard
@ 2012-07-01 17:19 ` Eric Bénard
  2012-07-11  3:25 ` [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Herbert Xu
  5 siblings, 0 replies; 9+ messages in thread
From: Eric Bénard @ 2012-07-01 17:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Nicolas Royer <nicolas@eukrea.com>

- set sg buffers size equals to message size
- add cfb & ofb tests for AES, DES & TDES

Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric B?nard <eric@eukrea.com>
Tested-by: Eric B?nard <eric@eukrea.com>
---
 crypto/tcrypt.c |   50 +++++++++++++++++++++++++++++++++++++++++++++-----
 1 files changed, 45 insertions(+), 5 deletions(-)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 8f147bf..a9296bd 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -809,7 +809,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
 			       struct cipher_speed_template *template,
 			       unsigned int tcount, u8 *keysize)
 {
-	unsigned int ret, i, j, iv_len;
+	unsigned int ret, i, j, k, iv_len;
 	struct tcrypt_result tresult;
 	const char *key;
 	char iv[128];
@@ -883,11 +883,23 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
 			}
 
 			sg_init_table(sg, TVMEMSIZE);
-			sg_set_buf(sg, tvmem[0] + *keysize,
+
+			k = *keysize + *b_size;
+			if (k > PAGE_SIZE) {
+				sg_set_buf(sg, tvmem[0] + *keysize,
 				   PAGE_SIZE - *keysize);
-			for (j = 1; j < TVMEMSIZE; j++) {
-				sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
-				memset(tvmem[j], 0xff, PAGE_SIZE);
+				k -= PAGE_SIZE;
+				j = 1;
+				while (k > PAGE_SIZE) {
+					sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
+					memset(tvmem[j], 0xff, PAGE_SIZE);
+					j++;
+					k -= PAGE_SIZE;
+				}
+				sg_set_buf(sg + j, tvmem[j], k);
+				memset(tvmem[j], 0xff, k);
+			} else {
+				sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
 			}
 
 			iv_len = crypto_ablkcipher_ivsize(tfm);
@@ -1512,6 +1524,14 @@ static int do_test(int m)
 				   speed_template_16_24_32);
 		test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
 				   speed_template_16_24_32);
+		test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
 		break;
 
 	case 501:
@@ -1527,6 +1547,18 @@ static int do_test(int m)
 		test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
 				   des3_speed_template, DES3_SPEED_VECTORS,
 				   speed_template_24);
+		test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		test_acipher_speed("cfb(des3_ede)", DECRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		test_acipher_speed("ofb(des3_ede)", DECRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
 		break;
 
 	case 502:
@@ -1538,6 +1570,14 @@ static int do_test(int m)
 				   speed_template_8);
 		test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
 				   speed_template_8);
+		test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
+				   speed_template_8);
+		test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
+				   speed_template_8);
+		test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
+				   speed_template_8);
+		test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
+				   speed_template_8);
 		break;
 
 	case 503:
-- 
1.7.7.6

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/5] crypto: add Atmel AES driver
  2012-07-01 17:19 ` [PATCH 2/5] crypto: add Atmel AES driver Eric Bénard
@ 2012-07-06 12:17   ` Jean-Christophe PLAGNIOL-VILLARD
  2012-07-06 13:25     ` Eric Bénard
  0 siblings, 1 reply; 9+ messages in thread
From: Jean-Christophe PLAGNIOL-VILLARD @ 2012-07-06 12:17 UTC (permalink / raw)
  To: linux-arm-kernel

On 19:19 Sun 01 Jul     , Eric B?nard wrote:
> From: Nicolas Royer <nicolas@eukrea.com>
> 
> Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
> Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
> Acked-by: Eric B?nard <eric@eukrea.com>
> Tested-by: Eric B?nard <eric@eukrea.com>
> ---
>  drivers/crypto/Kconfig          |   17 +
>  drivers/crypto/Makefile         |    3 +-
>  drivers/crypto/atmel-aes-regs.h |   62 ++
>  drivers/crypto/atmel-aes.c      | 1206 +++++++++++++++++++++++++++++++++++++++
>  4 files changed, 1287 insertions(+), 1 deletions(-)
>  create mode 100644 drivers/crypto/atmel-aes-regs.h
>  create mode 100644 drivers/crypto/atmel-aes.c
> 
> diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
> index 1092a77..1be94e5 100644
> --- a/drivers/crypto/Kconfig
> +++ b/drivers/crypto/Kconfig
> @@ -325,4 +325,21 @@ if CRYPTO_DEV_UX500
>  	source "drivers/crypto/ux500/Kconfig"
>  endif # if CRYPTO_DEV_UX500
>  
> +config CRYPTO_DEV_ATMEL_AES
> +	tristate "Support for Atmel AES hw accelerator"
> +	depends on ARCH_AT91
> +	select CRYPTO_CBC
> +	select CRYPTO_ECB
> +	select CRYPTO_AES
> +	select CRYPTO_ALGAPI
> +	select CRYPTO_BLKCIPHER
> +	select CONFIG_AT_HDMAC
> +	help
> +	  Some Atmel processors have AES hw accelerator.
> +	  Select this if you want to use the Atmel module for
> +	  AES algorithms.
> +
> +	  To compile this driver as a module, choose M here: the module
> +	  will be called atmel-aes.
> +
>  endif # CRYPTO_HW
> diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
> index 0139032..7d17b67 100644
> --- a/drivers/crypto/Makefile
> +++ b/drivers/crypto/Makefile
> @@ -14,4 +14,5 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
>  obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
>  obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
>  obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
> -obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
> \ No newline at end of file
> +obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
> +obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
> diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
> new file mode 100644
> index 0000000..2786bb1
> --- /dev/null
> +++ b/drivers/crypto/atmel-aes-regs.h
> @@ -0,0 +1,62 @@
> +#ifndef __ATMEL_AES_REGS_H__
> +#define __ATMEL_AES_REGS_H__
> +
> +#define AES_CR			0x00
> +#define AES_CR_START		(1 << 0)
> +#define AES_CR_SWRST		(1 << 8)
> +#define AES_CR_LOADSEED		(1 << 16)
> +
> +#define	AES_MR			0x04
> +#define AES_MR_CYPHER_DEC		(0 << 0)
> +#define AES_MR_CYPHER_ENC		(1 << 0)
> +#define	AES_MR_DUALBUFF			(1 << 3)
> +#define AES_MR_PROCDLY_MASK		(0xF << 4)
> +#define AES_MR_PROCDLY_OFFSET	4
> +#define AES_MR_SMOD_MASK		(0x3 << 8)
> +#define AES_MR_SMOD_MANUAL		(0x0 << 8)
> +#define AES_MR_SMOD_AUTO		(0x1 << 8)
> +#define AES_MR_SMOD_IDATAR0		(0x2 << 8)
> +#define	AES_MR_KEYSIZE_MASK		(0x3 << 10)
> +#define	AES_MR_KEYSIZE_128		(0x0 << 10)
> +#define	AES_MR_KEYSIZE_192		(0x1 << 10)
> +#define	AES_MR_KEYSIZE_256		(0x2 << 10)
> +#define AES_MR_OPMOD_MASK		(0x7 << 12)
> +#define AES_MR_OPMOD_ECB		(0x0 << 12)
> +#define AES_MR_OPMOD_CBC		(0x1 << 12)
> +#define AES_MR_OPMOD_OFB		(0x2 << 12)
> +#define AES_MR_OPMOD_CFB		(0x3 << 12)
> +#define AES_MR_OPMOD_CTR		(0x4 << 12)
> +#define AES_MR_LOD				(0x1 << 15)
> +#define AES_MR_CFBS_MASK		(0x7 << 16)
> +#define AES_MR_CFBS_128b		(0x0 << 16)
> +#define AES_MR_CFBS_64b			(0x1 << 16)
> +#define AES_MR_CFBS_32b			(0x2 << 16)
> +#define AES_MR_CFBS_16b			(0x3 << 16)
> +#define AES_MR_CFBS_8b			(0x4 << 16)
> +#define AES_MR_CKEY_MASK		(0xF << 20)
> +#define AES_MR_CKEY_OFFSET		20
> +#define AES_MR_CMTYP_MASK		(0x1F << 24)
> +#define AES_MR_CMTYP_OFFSET		24
> +
> +#define	AES_IER		0x10
> +#define	AES_IDR		0x14
> +#define	AES_IMR		0x18
> +#define	AES_ISR		0x1C
> +#define AES_INT_DATARDY		(1 << 0)
> +#define AES_INT_URAD		(1 << 8)
> +#define AES_ISR_URAT_MASK	(0xF << 12)
> +#define AES_ISR_URAT_IDR_WR_PROC	(0x0 << 12)
> +#define AES_ISR_URAT_ODR_RD_PROC	(0x1 << 12)
> +#define AES_ISR_URAT_MR_WR_PROC		(0x2 << 12)
> +#define AES_ISR_URAT_ODR_RD_SUBK	(0x3 << 12)
> +#define AES_ISR_URAT_MR_WR_SUBK		(0x4 << 12)
> +#define AES_ISR_URAT_WOR_RD			(0x5 << 12)
> +
> +#define AES_KEYWR(x)	(0x20 + ((x) * 0x04))
> +#define AES_IDATAR(x)	(0x40 + ((x) * 0x04))
> +#define AES_ODATAR(x)	(0x50 + ((x) * 0x04))
> +#define AES_IVR(x)		(0x60 + ((x) * 0x04))
> +
> +#define AES_HW_VERSION	0xFC
> +
> +#endif /* __ATMEL_AES_REGS_H__ */
> diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
> new file mode 100644
> index 0000000..6bb20ff
> --- /dev/null
> +++ b/drivers/crypto/atmel-aes.c
> @@ -0,0 +1,1206 @@
> +/*
> + * Cryptographic API.
> + *
> + * Support for ATMEL AES HW acceleration.
> + *
> + * Copyright (c) 2012 Eukr?a Electromatique - ATMEL
> + * Author: Nicolas Royer <nicolas@eukrea.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as published
> + * by the Free Software Foundation.
> + *
> + * Some ideas are from omap-aes.c driver.
> + */
> +
> +
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +#include <linux/err.h>
> +#include <linux/clk.h>
> +#include <linux/io.h>
> +#include <linux/hw_random.h>
> +#include <linux/platform_device.h>
> +
> +#include <linux/device.h>
> +#include <linux/module.h>
> +#include <linux/init.h>
> +#include <linux/errno.h>
> +#include <linux/interrupt.h>
> +#include <linux/kernel.h>
> +#include <linux/clk.h>
> +#include <linux/irq.h>
> +#include <linux/io.h>
> +#include <linux/platform_device.h>
> +#include <linux/scatterlist.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/delay.h>
> +#include <linux/crypto.h>
> +#include <linux/cryptohash.h>
> +#include <crypto/scatterwalk.h>
> +#include <crypto/algapi.h>
> +#include <crypto/aes.h>
> +#include <crypto/hash.h>
> +#include <crypto/internal/hash.h>
> +#include <linux/platform_data/atmel-aes.h>
> +#include "atmel-aes-regs.h"
> +
> +#define CFB8_BLOCK_SIZE		1
> +#define CFB16_BLOCK_SIZE	2
> +#define CFB32_BLOCK_SIZE	4
> +#define CFB64_BLOCK_SIZE	8
> +
> +/* AES flags */
> +#define AES_FLAGS_MODE_MASK	0x01ff
> +#define AES_FLAGS_ENCRYPT	BIT(0)
> +#define AES_FLAGS_CBC		BIT(1)
> +#define AES_FLAGS_CFB		BIT(2)
> +#define AES_FLAGS_CFB8		BIT(3)
> +#define AES_FLAGS_CFB16		BIT(4)
> +#define AES_FLAGS_CFB32		BIT(5)
> +#define AES_FLAGS_CFB64		BIT(6)
> +#define AES_FLAGS_OFB		BIT(7)
> +#define AES_FLAGS_CTR		BIT(8)
> +
> +#define AES_FLAGS_INIT		BIT(16)
> +#define AES_FLAGS_DMA		BIT(17)
> +#define AES_FLAGS_BUSY		BIT(18)
> +
> +#define AES_FLAGS_DUALBUFF	BIT(24)
> +
> +#define ATMEL_AES_QUEUE_LENGTH	1
> +#define ATMEL_AES_CACHE_SIZE	0
> +
> +#define ATMEL_AES_DMA_THRESHOLD		16
> +
> +
> +struct atmel_aes_dev;
> +
> +struct atmel_aes_ctx {
> +	struct atmel_aes_dev *dd;
> +
> +	int		keylen;
> +	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
> +};
> +
> +struct atmel_aes_reqctx {
> +	unsigned long mode;
> +};
> +
> +struct atmel_aes_dma {
> +	struct dma_chan			*chan;
> +	struct dma_slave_config dma_conf;
> +};
> +
> +struct atmel_aes_dev {
> +	struct list_head	list;
> +	unsigned long		phys_base;
> +	void __iomem		*io_base;
> +
> +	struct atmel_aes_ctx	*ctx;
> +	struct device		*dev;
> +	struct clk		*iclk;
> +	int	irq;
> +
> +	unsigned long		flags;
> +	int	err;
> +
> +	spinlock_t		lock;
> +	struct crypto_queue	queue;
> +
> +	struct tasklet_struct	done_task;
> +	struct tasklet_struct	queue_task;
> +
> +	struct ablkcipher_request	*req;
> +	size_t	total;
> +
> +	struct scatterlist	*in_sg;
> +	unsigned int		nb_in_sg;
> +
> +	struct scatterlist	*out_sg;
> +	unsigned int		nb_out_sg;
> +
> +	size_t	bufcnt;
> +
> +	u8	buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
can you allocate it
> +	int	dma_in;
> +	struct atmel_aes_dma	dma_lch_in;
> +
> +	u8	buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
ditto

otherwise looks good

Best Regards,
J.
> +	int	dma_out;
> +	struct atmel_aes_dma	dma_lch_out;
> +
> +	u32	hw_version;
> +};
> +
> +struct atmel_aes_drv {
> +	struct list_head	dev_list;
> +	spinlock_t		lock;
> +};
> +
> +static struct atmel_aes_drv atmel_aes = {
> +	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
> +	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
> +};
> +
> +static int atmel_aes_sg_length(struct ablkcipher_request *req,
> +			struct scatterlist *sg)
> +{
> +	unsigned int total = req->nbytes;
> +	int sg_nb;
> +	unsigned int len;
> +	struct scatterlist *sg_list;
> +
> +	sg_nb = 0;
> +	sg_list = sg;
> +	total = req->nbytes;
> +
> +	while (total) {
> +		len = min(sg_list->length, total);
> +
> +		sg_nb++;
> +		total -= len;
> +
> +		sg_list = sg_next(sg_list);
> +		if (!sg_list)
> +			total = 0;
> +	}
> +
> +	return sg_nb;
> +}
> +
> +static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
> +{
> +	return readl_relaxed(dd->io_base + offset);
> +}
> +
> +static inline void atmel_aes_write(struct atmel_aes_dev *dd,
> +					u32 offset, u32 value)
> +{
> +	writel_relaxed(value, dd->io_base + offset);
> +}
> +
> +static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
> +					u32 *value, int count)
> +{
> +	for (; count--; value++, offset += 4)
> +		*value = atmel_aes_read(dd, offset);
> +}
> +
> +static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
> +					u32 *value, int count)
> +{
> +	for (; count--; value++, offset += 4)
> +		atmel_aes_write(dd, offset, *value);
> +}
> +
> +static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
> +{
> +	atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
> +
> +	if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
> +		dd->flags |= AES_FLAGS_DUALBUFF;
> +}
> +
> +static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
> +{
> +	struct atmel_aes_dev *aes_dd = NULL;
> +	struct atmel_aes_dev *tmp;
> +
> +	spin_lock_bh(&atmel_aes.lock);
> +	if (!ctx->dd) {
> +		list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
> +			aes_dd = tmp;
> +			break;
> +		}
> +		ctx->dd = aes_dd;
> +	} else {
> +		aes_dd = ctx->dd;
> +	}
> +
> +	spin_unlock_bh(&atmel_aes.lock);
> +
> +	return aes_dd;
> +}
> +
> +static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
> +{
> +	clk_prepare_enable(dd->iclk);
> +
> +	if (!(dd->flags & AES_FLAGS_INIT)) {
		return directly
> +		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
> +		atmel_aes_dualbuff_test(dd);
> +		dd->flags |= AES_FLAGS_INIT;
> +		dd->err = 0;
> +	}
> +
> +	return 0;
> +}
> +
> +static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
> +{
> +	atmel_aes_hw_init(dd);
> +
> +	dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
> +
> +	clk_disable_unprepare(dd->iclk);
> +}
> +
> +static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
> +{
> +	struct ablkcipher_request *req = dd->req;
> +
> +	clk_disable_unprepare(dd->iclk);
> +	dd->flags &= ~AES_FLAGS_BUSY;
> +
> +	req->base.complete(&req->base, err);
> +}
> +
> +static void atmel_aes_dma_callback(void *data)
> +{
> +	struct atmel_aes_dev *dd = data;
> +
> +	/* dma_lch_out - completed */
> +	tasklet_schedule(&dd->done_task);
> +}
> +
> +static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
> +{
> +	struct dma_async_tx_descriptor	*in_desc, *out_desc;
> +	int nb_dma_sg_in, nb_dma_sg_out;
> +
> +	dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
> +	if (!dd->nb_in_sg)
> +		goto exit_err;
> +
> +	nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
> +			DMA_TO_DEVICE);
> +	if (!nb_dma_sg_in)
> +		goto exit_err;
> +
> +	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
> +				nb_dma_sg_in, DMA_MEM_TO_DEV,
> +				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
> +
> +	if (!in_desc)
> +		goto unmap_in;
> +
> +	/* callback not needed */
> +
> +	dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
> +	if (!dd->nb_out_sg)
> +		goto unmap_in;
> +
> +	nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
> +			DMA_FROM_DEVICE);
> +	if (!nb_dma_sg_out)
> +		goto unmap_out;
> +
> +	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
> +				nb_dma_sg_out, DMA_DEV_TO_MEM,
> +				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> +
> +	if (!out_desc)
> +		goto unmap_out;
> +
> +	out_desc->callback = atmel_aes_dma_callback;
> +	out_desc->callback_param = dd;
> +
> +	dd->total -= dd->req->nbytes;
> +
> +	dmaengine_submit(out_desc);
> +	dma_async_issue_pending(dd->dma_lch_out.chan);
> +
> +	dmaengine_submit(in_desc);
> +	dma_async_issue_pending(dd->dma_lch_in.chan);
> +
> +	return 0;
> +
> +unmap_out:
> +	dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
> +		DMA_FROM_DEVICE);
> +unmap_in:
> +	dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
> +		DMA_TO_DEVICE);
> +exit_err:
> +	return -EINVAL;
> +}
> +
> +static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
> +{
> +	dd->flags &= ~AES_FLAGS_DMA;
> +
> +	/* use cache buffers */
> +	dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
> +	if (!dd->nb_in_sg)
> +		return -EINVAL;
> +
> +	dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
> +	if (!dd->nb_in_sg)
> +		return -EINVAL;
> +
> +	dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
> +					dd->buf_in, dd->total);
> +
> +	if (!dd->bufcnt)
> +		return -EINVAL;
> +
> +	dd->total -= dd->bufcnt;
> +
> +	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
> +	atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
> +				dd->bufcnt >> 2);
> +
> +	return 0;
> +}
> +
> +static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
> +{
> +	int err;
> +
> +	if (dd->flags & AES_FLAGS_CFB8) {
> +		dd->dma_lch_in.dma_conf.dst_addr_width =
> +			DMA_SLAVE_BUSWIDTH_1_BYTE;
> +		dd->dma_lch_out.dma_conf.src_addr_width =
> +			DMA_SLAVE_BUSWIDTH_1_BYTE;
> +	} else if (dd->flags & AES_FLAGS_CFB16) {
> +		dd->dma_lch_in.dma_conf.dst_addr_width =
> +			DMA_SLAVE_BUSWIDTH_2_BYTES;
> +		dd->dma_lch_out.dma_conf.src_addr_width =
> +			DMA_SLAVE_BUSWIDTH_2_BYTES;
> +	} else {
> +		dd->dma_lch_in.dma_conf.dst_addr_width =
> +			DMA_SLAVE_BUSWIDTH_4_BYTES;
> +		dd->dma_lch_out.dma_conf.src_addr_width =
> +			DMA_SLAVE_BUSWIDTH_4_BYTES;
> +	}
> +
> +	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
> +	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
> +
> +	dd->flags |= AES_FLAGS_DMA;
> +	err = atmel_aes_crypt_dma(dd);
> +
> +	return err;
> +}
> +
> +static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
> +{
> +	int err;
> +	u32 valcr = 0, valmr = 0;
> +
> +	err = atmel_aes_hw_init(dd);
> +
> +	if (err)
> +		return err;
> +
> +	/* MR register must be set before IV registers */
> +	if (dd->ctx->keylen == AES_KEYSIZE_128)
> +		valmr |= AES_MR_KEYSIZE_128;
> +	else if (dd->ctx->keylen == AES_KEYSIZE_192)
> +		valmr |= AES_MR_KEYSIZE_192;
> +	else
> +		valmr |= AES_MR_KEYSIZE_256;
> +
> +	if (dd->flags & AES_FLAGS_CBC) {
> +		valmr |= AES_MR_OPMOD_CBC;
> +	} else if (dd->flags & AES_FLAGS_CFB) {
> +		valmr |= AES_MR_OPMOD_CFB;
> +		if (dd->flags & AES_FLAGS_CFB8)
> +			valmr |= AES_MR_CFBS_8b;
> +		else if (dd->flags & AES_FLAGS_CFB16)
> +			valmr |= AES_MR_CFBS_16b;
> +		else if (dd->flags & AES_FLAGS_CFB32)
> +			valmr |= AES_MR_CFBS_32b;
> +		else if (dd->flags & AES_FLAGS_CFB64)
> +			valmr |= AES_MR_CFBS_64b;
> +	} else if (dd->flags & AES_FLAGS_OFB) {
> +		valmr |= AES_MR_OPMOD_OFB;
> +	} else if (dd->flags & AES_FLAGS_CTR) {
> +		valmr |= AES_MR_OPMOD_CTR;
> +	} else {
> +		valmr |= AES_MR_OPMOD_ECB;
> +	}
> +
> +	if (dd->flags & AES_FLAGS_ENCRYPT)
> +		valmr |= AES_MR_CYPHER_ENC;
> +
> +	if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
> +		valmr |= AES_MR_SMOD_IDATAR0;
> +		if (dd->flags & AES_FLAGS_DUALBUFF)
> +			valmr |= AES_MR_DUALBUFF;
> +	} else {
> +		valmr |= AES_MR_SMOD_AUTO;
> +	}
> +
> +	atmel_aes_write(dd, AES_CR, valcr);
> +	atmel_aes_write(dd, AES_MR, valmr);
> +
> +	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
> +						dd->ctx->keylen >> 2);
> +
> +	if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
> +	   (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
> +	   dd->req->info) {
> +		atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
> +	}
> +
> +	return 0;
> +}
> +
> +static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
> +			       struct ablkcipher_request *req)
> +{
> +	struct crypto_async_request *async_req, *backlog;
> +	struct atmel_aes_ctx *ctx;
> +	struct atmel_aes_reqctx *rctx;
> +	unsigned long flags;
> +	int err, ret = 0;
> +
> +	spin_lock_irqsave(&dd->lock, flags);
> +	if (req)
> +		ret = ablkcipher_enqueue_request(&dd->queue, req);
> +	if (dd->flags & AES_FLAGS_BUSY) {
> +		spin_unlock_irqrestore(&dd->lock, flags);
> +		return ret;
> +	}
> +	backlog = crypto_get_backlog(&dd->queue);
> +	async_req = crypto_dequeue_request(&dd->queue);
> +	if (async_req)
> +		dd->flags |= AES_FLAGS_BUSY;
> +	spin_unlock_irqrestore(&dd->lock, flags);
> +
> +	if (!async_req)
> +		return ret;
> +
> +	if (backlog)
> +		backlog->complete(backlog, -EINPROGRESS);
> +
> +	req = ablkcipher_request_cast(async_req);
> +
> +	/* assign new request to device */
> +	dd->req = req;
> +	dd->total = req->nbytes;
> +	dd->in_sg = req->src;
> +	dd->out_sg = req->dst;
> +
> +	rctx = ablkcipher_request_ctx(req);
> +	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
> +	rctx->mode &= AES_FLAGS_MODE_MASK;
> +	dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
> +	dd->ctx = ctx;
> +	ctx->dd = dd;
> +
> +	err = atmel_aes_write_ctrl(dd);
> +	if (!err) {
> +		if (dd->total > ATMEL_AES_DMA_THRESHOLD)
> +			err = atmel_aes_crypt_dma_start(dd);
> +		else
> +			err = atmel_aes_crypt_cpu_start(dd);
> +	}
> +	if (err) {
> +		/* aes_task will not finish it, so do it here */
> +		atmel_aes_finish_req(dd, err);
> +		tasklet_schedule(&dd->queue_task);
> +	}
> +
> +	return ret;
> +}
> +
> +static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
> +{
> +	int err = -EINVAL;
> +
> +	if (dd->flags & AES_FLAGS_DMA) {
> +		dma_unmap_sg(dd->dev, dd->out_sg,
> +			dd->nb_out_sg, DMA_FROM_DEVICE);
> +		dma_unmap_sg(dd->dev, dd->in_sg,
> +			dd->nb_in_sg, DMA_TO_DEVICE);
> +		err = 0;
> +	}
> +
> +	return err;
> +}
> +
> +static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
> +{
> +	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
> +			crypto_ablkcipher_reqtfm(req));
> +	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
> +	struct atmel_aes_dev *dd;
> +
> +	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
> +		pr_err("request size is not exact amount of AES blocks\n");
> +		return -EINVAL;
> +	}
> +
> +	dd = atmel_aes_find_dev(ctx);
> +	if (!dd)
> +		return -ENODEV;
> +
> +	rctx->mode = mode;
> +
> +	return atmel_aes_handle_queue(dd, req);
> +}
> +
> +static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
> +{
> +	struct at_dma_slave	*sl = slave;
> +
> +	if (sl && sl->dma_dev == chan->device->dev) {
> +		chan->private = sl;
> +		return true;
> +	} else {
> +		return false;
> +	}
> +}
> +
> +static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
> +{
> +	int err = -ENOMEM;
> +	struct aes_platform_data	*pdata;
> +	dma_cap_mask_t mask_in, mask_out;
> +
> +	pdata = dd->dev->platform_data;
> +
> +	if (pdata && pdata->dma_slave->txdata.dma_dev &&
> +		pdata->dma_slave->rxdata.dma_dev) {
> +
> +		/* Try to grab 2 DMA channels */
> +		dma_cap_zero(mask_in);
> +		dma_cap_set(DMA_SLAVE, mask_in);
> +
> +		dd->dma_lch_in.chan = dma_request_channel(mask_in,
> +				atmel_aes_filter, &pdata->dma_slave->rxdata);
> +		if (!dd->dma_lch_in.chan)
> +			goto err_dma_in;
> +
> +		dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
> +		dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
> +			AES_IDATAR(0);
> +		dd->dma_lch_in.dma_conf.src_maxburst = 1;
> +		dd->dma_lch_in.dma_conf.dst_maxburst = 1;
> +		dd->dma_lch_in.dma_conf.device_fc = false;
> +
> +		dma_cap_zero(mask_out);
> +		dma_cap_set(DMA_SLAVE, mask_out);
> +		dd->dma_lch_out.chan = dma_request_channel(mask_out,
> +				atmel_aes_filter, &pdata->dma_slave->txdata);
> +		if (!dd->dma_lch_out.chan)
> +			goto err_dma_out;
> +
> +		dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
> +		dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
> +			AES_ODATAR(0);
> +		dd->dma_lch_out.dma_conf.src_maxburst = 1;
> +		dd->dma_lch_out.dma_conf.dst_maxburst = 1;
> +		dd->dma_lch_out.dma_conf.device_fc = false;
> +
> +		return 0;
> +	} else {
> +		return -ENODEV;
> +	}
> +
> +err_dma_out:
> +	dma_release_channel(dd->dma_lch_in.chan);
> +err_dma_in:
> +	return err;
> +}
> +
> +static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
> +{
> +	dma_release_channel(dd->dma_lch_in.chan);
> +	dma_release_channel(dd->dma_lch_out.chan);
> +}
> +
> +static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
> +			   unsigned int keylen)
> +{
> +	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
> +
> +	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
> +		   keylen != AES_KEYSIZE_256) {
> +		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
> +		return -EINVAL;
> +	}
> +
> +	memcpy(ctx->key, key, keylen);
> +	ctx->keylen = keylen;
> +
> +	return 0;
> +}
> +
> +static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT);
> +}
> +
> +static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		0);
> +}
> +
> +static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
> +}
> +
> +static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CBC);
> +}
> +
> +static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
> +}
> +
> +static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_OFB);
> +}
> +
> +static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
> +}
> +
> +static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CFB);
> +}
> +
> +static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
> +}
> +
> +static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CFB | AES_FLAGS_CFB64);
> +}
> +
> +static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
> +}
> +
> +static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CFB | AES_FLAGS_CFB32);
> +}
> +
> +static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
> +}
> +
> +static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CFB | AES_FLAGS_CFB16);
> +}
> +
> +static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT |	AES_FLAGS_CFB | AES_FLAGS_CFB8);
> +}
> +
> +static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CFB | AES_FLAGS_CFB8);
> +}
> +
> +static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
> +}
> +
> +static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
> +{
> +	return atmel_aes_crypt(req,
> +		AES_FLAGS_CTR);
> +}
> +
> +static int atmel_aes_cra_init(struct crypto_tfm *tfm)
> +{
> +	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
> +
> +	return 0;
> +}
> +
> +static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
> +{
> +}
> +
> +static struct crypto_alg aes_algs[] = {
> +{
> +	.cra_name		= "ecb(aes)",
> +	.cra_driver_name	= "atmel-ecb-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= AES_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_ecb_encrypt,
> +		.decrypt	= atmel_aes_ecb_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "cbc(aes)",
> +	.cra_driver_name	= "atmel-cbc-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= AES_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_cbc_encrypt,
> +		.decrypt	= atmel_aes_cbc_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "ofb(aes)",
> +	.cra_driver_name	= "atmel-ofb-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= AES_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_ofb_encrypt,
> +		.decrypt	= atmel_aes_ofb_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "cfb(aes)",
> +	.cra_driver_name	= "atmel-cfb-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= AES_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_cfb_encrypt,
> +		.decrypt	= atmel_aes_cfb_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "cfb32(aes)",
> +	.cra_driver_name	= "atmel-cfb32-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= CFB32_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_cfb32_encrypt,
> +		.decrypt	= atmel_aes_cfb32_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "cfb16(aes)",
> +	.cra_driver_name	= "atmel-cfb16-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= CFB16_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_cfb16_encrypt,
> +		.decrypt	= atmel_aes_cfb16_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "cfb8(aes)",
> +	.cra_driver_name	= "atmel-cfb8-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= CFB64_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_cfb8_encrypt,
> +		.decrypt	= atmel_aes_cfb8_decrypt,
> +	}
> +},
> +{
> +	.cra_name		= "ctr(aes)",
> +	.cra_driver_name	= "atmel-ctr-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= AES_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_ctr_encrypt,
> +		.decrypt	= atmel_aes_ctr_decrypt,
> +	}
> +},
> +};
> +
> +static struct crypto_alg aes_cfb64_alg[] = {
> +{
> +	.cra_name		= "cfb64(aes)",
> +	.cra_driver_name	= "atmel-cfb64-aes",
> +	.cra_priority		= 100,
> +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize		= CFB64_BLOCK_SIZE,
> +	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
> +	.cra_alignmask		= 0x0,
> +	.cra_type		= &crypto_ablkcipher_type,
> +	.cra_module		= THIS_MODULE,
> +	.cra_init		= atmel_aes_cra_init,
> +	.cra_exit		= atmel_aes_cra_exit,
> +	.cra_u.ablkcipher = {
> +		.min_keysize	= AES_MIN_KEY_SIZE,
> +		.max_keysize	= AES_MAX_KEY_SIZE,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.setkey		= atmel_aes_setkey,
> +		.encrypt	= atmel_aes_cfb64_encrypt,
> +		.decrypt	= atmel_aes_cfb64_decrypt,
> +	}
> +},
> +};
> +
> +static void atmel_aes_queue_task(unsigned long data)
> +{
> +	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
> +
> +	atmel_aes_handle_queue(dd, NULL);
> +}
> +
> +static void atmel_aes_done_task(unsigned long data)
> +{
> +	struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
> +	int err;
> +
> +	if (!(dd->flags & AES_FLAGS_DMA)) {
> +		atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
> +				dd->bufcnt >> 2);
> +
> +		if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
> +			dd->buf_out, dd->bufcnt))
> +			err = 0;
> +		else
> +			err = -EINVAL;
> +
> +		goto cpu_end;
> +	}
> +
> +	err = atmel_aes_crypt_dma_stop(dd);
> +
> +	err = dd->err ? : err;
> +
> +	if (dd->total && !err) {
> +		err = atmel_aes_crypt_dma_start(dd);
> +		if (!err)
> +			return; /* DMA started. Not fininishing. */
> +	}
> +
> +cpu_end:
> +	atmel_aes_finish_req(dd, err);
> +	atmel_aes_handle_queue(dd, NULL);
> +}
> +
> +static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
> +{
> +	struct atmel_aes_dev *aes_dd = dev_id;
> +	u32 reg;
> +
> +	reg = atmel_aes_read(aes_dd, AES_ISR);
> +	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
> +		atmel_aes_write(aes_dd, AES_IDR, reg);
> +		if (AES_FLAGS_BUSY & aes_dd->flags)
> +			tasklet_schedule(&aes_dd->done_task);
> +		else
> +			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
> +		return IRQ_HANDLED;
> +	}
> +
> +	return IRQ_NONE;
> +}
> +
> +static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
> +		crypto_unregister_alg(&aes_algs[i]);
> +	if (dd->hw_version >= 0x130)
> +		crypto_unregister_alg(&aes_cfb64_alg[0]);
> +}
> +
> +static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
> +{
> +	int err, i, j;
> +
> +	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
> +		INIT_LIST_HEAD(&aes_algs[i].cra_list);
> +		err = crypto_register_alg(&aes_algs[i]);
> +		if (err)
> +			goto err_aes_algs;
> +	}
> +
> +	atmel_aes_hw_version_init(dd);
> +
> +	if (dd->hw_version >= 0x130) {
> +		INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
> +		err = crypto_register_alg(&aes_cfb64_alg[0]);
> +		if (err)
> +			goto err_aes_cfb64_alg;
> +	}
> +
> +	return 0;
> +
> +err_aes_cfb64_alg:
> +	i = ARRAY_SIZE(aes_algs);
> +err_aes_algs:
> +	for (j = 0; j < i; j++)
> +		crypto_unregister_alg(&aes_algs[j]);
> +
> +	return err;
> +}
> +
> +static int __devinit atmel_aes_probe(struct platform_device *pdev)
> +{
> +	struct atmel_aes_dev *aes_dd;
> +	struct aes_platform_data	*pdata;
> +	struct device *dev = &pdev->dev;
> +	struct resource *aes_res;
> +	unsigned long aes_phys_size;
> +	int err;
> +
> +	pdata = pdev->dev.platform_data;
> +	if (!pdata) {
> +		err = -ENXIO;
> +		goto aes_dd_err;
> +	}
> +
> +	aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
> +	if (aes_dd == NULL) {
> +		dev_err(dev, "unable to alloc data struct.\n");
> +		err = -ENOMEM;
> +		goto aes_dd_err;
> +	}
> +
> +	aes_dd->dev = dev;
> +
> +	platform_set_drvdata(pdev, aes_dd);
> +
> +	INIT_LIST_HEAD(&aes_dd->list);
> +
> +	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
> +					(unsigned long)aes_dd);
> +	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
> +					(unsigned long)aes_dd);
> +
> +	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
> +
> +	aes_dd->irq = -1;
> +
> +	/* Get the base address */
> +	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	if (!aes_res) {
> +		dev_err(dev, "no MEM resource info\n");
> +		err = -ENODEV;
> +		goto res_err;
> +	}
> +	aes_dd->phys_base = aes_res->start;
> +	aes_phys_size = resource_size(aes_res);
> +
> +	/* Get the IRQ */
> +	aes_dd->irq = platform_get_irq(pdev,  0);
> +	if (aes_dd->irq < 0) {
> +		dev_err(dev, "no IRQ resource info\n");
> +		err = aes_dd->irq;
> +		goto aes_irq_err;
> +	}
> +
> +	err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
> +						aes_dd);
> +	if (err) {
> +		dev_err(dev, "unable to request aes irq.\n");
> +		goto aes_irq_err;
> +	}
> +
> +	/* Initializing the clock */
> +	aes_dd->iclk = clk_get(&pdev->dev, NULL);
> +	if (IS_ERR(aes_dd->iclk)) {
> +		dev_err(dev, "clock intialization failed.\n");
> +		err = PTR_ERR(aes_dd->iclk);
> +		goto clk_err;
> +	}
> +
> +	aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
> +	if (!aes_dd->io_base) {
> +		dev_err(dev, "can't ioremap\n");
> +		err = -ENOMEM;
> +		goto aes_io_err;
> +	}
> +
> +	err = atmel_aes_dma_init(aes_dd);
> +	if (err)
> +		goto err_aes_dma;
> +
> +	spin_lock(&atmel_aes.lock);
> +	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
> +	spin_unlock(&atmel_aes.lock);
> +
> +	err = atmel_aes_register_algs(aes_dd);
> +	if (err)
> +		goto err_algs;
> +
> +	dev_info(dev, "Atmel AES\n");
> +
> +	return 0;
> +
> +err_algs:
> +	spin_lock(&atmel_aes.lock);
> +	list_del(&aes_dd->list);
> +	spin_unlock(&atmel_aes.lock);
> +	atmel_aes_dma_cleanup(aes_dd);
> +err_aes_dma:
> +	iounmap(aes_dd->io_base);
> +aes_io_err:
> +	clk_put(aes_dd->iclk);
> +clk_err:
> +	free_irq(aes_dd->irq, aes_dd);
> +aes_irq_err:
> +res_err:
> +	tasklet_kill(&aes_dd->done_task);
> +	tasklet_kill(&aes_dd->queue_task);
> +	kfree(aes_dd);
> +	aes_dd = NULL;
> +aes_dd_err:
> +	dev_err(dev, "initialization failed.\n");
> +
> +	return err;
> +}
> +
> +static int __devexit atmel_aes_remove(struct platform_device *pdev)
> +{
> +	static struct atmel_aes_dev *aes_dd;
> +
> +	aes_dd = platform_get_drvdata(pdev);
> +	if (!aes_dd)
> +		return -ENODEV;
> +	spin_lock(&atmel_aes.lock);
> +	list_del(&aes_dd->list);
> +	spin_unlock(&atmel_aes.lock);
> +
> +	atmel_aes_unregister_algs(aes_dd);
> +
> +	tasklet_kill(&aes_dd->done_task);
> +	tasklet_kill(&aes_dd->queue_task);
> +
> +	atmel_aes_dma_cleanup(aes_dd);
> +
> +	iounmap(aes_dd->io_base);
> +
> +	clk_put(aes_dd->iclk);
> +
> +	if (aes_dd->irq > 0)
> +		free_irq(aes_dd->irq, aes_dd);
> +
> +	kfree(aes_dd);
> +	aes_dd = NULL;
> +
> +	return 0;
> +}
> +
> +static struct platform_driver atmel_aes_driver = {
> +	.probe		= atmel_aes_probe,
> +	.remove		= __devexit_p(atmel_aes_remove),
> +	.driver		= {
> +		.name	= "atmel_aes",
> +		.owner	= THIS_MODULE,
> +	},
> +};
> +
> +module_platform_driver(atmel_aes_driver);
> +
> +MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
> +MODULE_LICENSE("GPL v2");
> +MODULE_AUTHOR("Nicolas Royer - Eukr?a Electromatique");
> -- 
> 1.7.7.6
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 2/5] crypto: add Atmel AES driver
  2012-07-06 12:17   ` Jean-Christophe PLAGNIOL-VILLARD
@ 2012-07-06 13:25     ` Eric Bénard
  0 siblings, 0 replies; 9+ messages in thread
From: Eric Bénard @ 2012-07-06 13:25 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jean-Christophe,

Le Fri, 6 Jul 2012 14:17:19 +0200,
Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> a ?crit :
> > +	u8	buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
> can you allocate it
> > +	int	dma_in;
> > +	struct atmel_aes_dma	dma_lch_in;
> > +
> > +	u8	buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
> ditto
> 
Why is that preferred to allocate dynamicaly these buffers (we are
talking of 2 x 16 bytes buffers) ?

> otherwise looks good
> 
thanks

Eric

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers
  2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
                   ` (4 preceding siblings ...)
  2012-07-01 17:19 ` [PATCH 5/5] crypto: add new tests to tcrypt Eric Bénard
@ 2012-07-11  3:25 ` Herbert Xu
  5 siblings, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2012-07-11  3:25 UTC (permalink / raw)
  To: linux-arm-kernel

On Sun, Jul 01, 2012 at 07:19:42PM +0200, Eric B?nard wrote:
> This patch serie adds the support for the crypto engine available in
> the Atmel SAM9G46 http://www.atmel.com/devices/SAM9G46.aspx and SAM9M11
> http://www.atmel.com/devices/SAM9M11.aspx
> 
> For each driver the choice between PDC/DMA or PIO was optimized to
> maximize the performances (benchmarks will soon be available on
> the linux4sam wiki http://www.at91.com/).
> 
> These drivers were tested on both devices : the last patch adds a few
> missing tests to tcrypt.
> 
> This work was sponsored by Atmel.

All applied.  Thanks!
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2012-07-11  3:25 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-07-01 17:19 [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Eric Bénard
2012-07-01 17:19 ` [PATCH 1/5] ARM: AT91SAM9G45: add crypto peripherals Eric Bénard
2012-07-01 17:19 ` [PATCH 2/5] crypto: add Atmel AES driver Eric Bénard
2012-07-06 12:17   ` Jean-Christophe PLAGNIOL-VILLARD
2012-07-06 13:25     ` Eric Bénard
2012-07-01 17:19 ` [PATCH 3/5] crypto: add Atmel DES/TDES driver Eric Bénard
2012-07-01 17:19 ` [PATCH 4/5] crypto: add Atmel SHA1/SHA256 driver Eric Bénard
2012-07-01 17:19 ` [PATCH 5/5] crypto: add new tests to tcrypt Eric Bénard
2012-07-11  3:25 ` [PATCH 0/5] AT91SAM9G46/M11: add crypto drivers Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).