* [PATCH RFC 1/4] crytpo: qat - Fix 64 bytes requests
2014-11-06 19:59 [PATCH RFC 0/4] crypto: qat - Add userspace access to QAT services Tadeusz Struk
@ 2014-11-06 19:59 ` Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 2/4] crypto: qat - Add PKE firmware Tadeusz Struk
` (2 subsequent siblings)
3 siblings, 0 replies; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-06 19:59 UTC (permalink / raw)
To: herbert; +Cc: davem, linux-crypto, qat-linux
Fix invalid inflights calculation for 64byte PKE requests that will be sent
from userspace.
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
.../qat/qat_common/adf_transport_access_macros.h | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 91d88d6..160c9a3 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -83,14 +83,14 @@
#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-/* Valid internal msg size values internal */
+/* Valid internal msg size values */
#define ADF_MSG_SIZE_32 0x01
#define ADF_MSG_SIZE_64 0x02
#define ADF_MSG_SIZE_128 0x04
#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-/* Size to bytes conversion macros for ring and msg values */
+/* Size to bytes conversion macros for ring and msg size values */
#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
@@ -100,8 +100,11 @@
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
ADF_RING_SIZE_4K : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
- ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
#define BUILD_RING_CONFIG(size) \
((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH RFC 2/4] crypto: qat - Add PKE firmware
2014-11-06 19:59 [PATCH RFC 0/4] crypto: qat - Add userspace access to QAT services Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 1/4] crytpo: qat - Fix 64 bytes requests Tadeusz Struk
@ 2014-11-06 19:59 ` Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 3/4] crypto: qat - Add userspace instances Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace Tadeusz Struk
3 siblings, 0 replies; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-06 19:59 UTC (permalink / raw)
To: herbert; +Cc: davem, linux-crypto, qat-linux
Add PKE firmware header file and code that loads the PKE firmware.
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
drivers/crypto/qat/qat_common/adf_accel_devices.h | 4 +
drivers/crypto/qat/qat_common/adf_accel_engine.c | 44 ++++++++-
drivers/crypto/qat/qat_common/adf_ctl_drv.c | 1
drivers/crypto/qat/qat_common/icp_qat_fw_pke.h | 99 +++++++++++++++++++++
4 files changed, 144 insertions(+), 4 deletions(-)
create mode 100644 drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 2ed4256..9586e5e 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -182,6 +182,10 @@ struct icp_qat_fw_loader_handle;
struct adf_fw_loader_data {
struct icp_qat_fw_loader_handle *fw_loader;
const struct firmware *uof_fw;
+ const struct firmware *mmp_fw;
+ char *mmp_vaddr;
+ dma_addr_t mmp_addr;
+ size_t mmp_fw_size;
};
struct adf_accel_dev {
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index c77453b..e9e131b 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -51,6 +51,8 @@
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
+#define ADF_QAT_MMP_FW "qat_mmp.bin"
+
int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
@@ -63,20 +65,51 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
return -EFAULT;
}
-
+ if (request_firmware(&loader_data->mmp_fw, ADF_QAT_MMP_FW,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ pr_err("QAT: Failed to load firmware %s\n", ADF_QAT_MMP_FW);
+ goto err_uof;
+ }
uof_size = loader_data->uof_fw->size;
uof_addr = (void *)loader_data->uof_fw->data;
+ loader_data->mmp_vaddr = kmalloc_node(loader_data->mmp_fw->size,
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!loader_data->mmp_vaddr)
+ goto err_mmp;
+
+ loader_data->mmp_fw_size = loader_data->mmp_fw->size;
+ memcpy(loader_data->mmp_vaddr, loader_data->mmp_fw->data,
+ loader_data->mmp_fw_size);
+ loader_data->mmp_addr = dma_map_single(&GET_DEV(accel_dev),
+ (void *)loader_data->mmp_vaddr,
+ loader_data->mmp_fw->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&GET_DEV(accel_dev), loader_data->mmp_addr)) {
+ pr_err("QAT: Failed to dmamap MMP fw\n");
+ goto err_free;
+ }
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
pr_err("QAT: Failed to map UOF\n");
- goto out_err;
+ goto err_dma_map;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
pr_err("QAT: Failed to map UOF\n");
- goto out_err;
+ goto uclo_del_obj;
}
+ release_firmware(loader_data->mmp_fw);
return 0;
-out_err:
+uclo_del_obj:
+ qat_uclo_del_uof_obj(loader_data->fw_loader);
+err_dma_map:
+ dma_unmap_single(&GET_DEV(accel_dev), loader_data->mmp_addr,
+ loader_data->mmp_fw->size, DMA_TO_DEVICE);
+err_free:
+ kfree(loader_data->mmp_vaddr);
+err_mmp:
+ release_firmware(loader_data->mmp_fw);
+err_uof:
release_firmware(loader_data->uof_fw);
return -EFAULT;
}
@@ -85,6 +118,9 @@ int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ dma_unmap_single(&GET_DEV(accel_dev), loader_data->mmp_addr,
+ loader_data->mmp_fw_size, DMA_TO_DEVICE);
+ kfree(loader_data->mmp_vaddr);
release_firmware(loader_data->uof_fw);
qat_uclo_del_uof_obj(loader_data->fw_loader);
qat_hal_deinit(loader_data->fw_loader);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 244d733..a660539 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -486,5 +486,6 @@ module_init(adf_register_ctl_device_driver);
module_exit(adf_unregister_ctl_device_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE("qat_mmp.bin");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 0000000..4b8910f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,99 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+ uint64_t content_desc_addr;
+ uint32_t content_desc_resrvd;
+ uint32_t func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+ uint8_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t comn_req_flags;
+ uint16_t resrvd4;
+ struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+ struct icp_qat_fw_req_pke_hdr pke_hdr;
+ struct icp_qat_fw_req_pke_mid pke_mid;
+ uint8_t output_param_count;
+ uint8_t input_param_count;
+ uint16_t output_param_size;
+ uint32_t input_param_size;
+ uint64_t next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+ uint8_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ uint16_t comn_resp_flags;
+ uint16_t resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+ struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+};
+#endif
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH RFC 3/4] crypto: qat - Add userspace instances
2014-11-06 19:59 [PATCH RFC 0/4] crypto: qat - Add userspace access to QAT services Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 1/4] crytpo: qat - Fix 64 bytes requests Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 2/4] crypto: qat - Add PKE firmware Tadeusz Struk
@ 2014-11-06 19:59 ` Tadeusz Struk
2014-11-06 19:59 ` [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace Tadeusz Struk
3 siblings, 0 replies; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-06 19:59 UTC (permalink / raw)
To: herbert; +Cc: davem, linux-crypto, qat-linux
Add code that creates and manages userspace crypto instances.
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
drivers/crypto/qat/qat_common/adf_accel_devices.h | 15 +
drivers/crypto/qat/qat_common/adf_cfg_strings.h | 1
drivers/crypto/qat/qat_common/adf_common_drv.h | 21 ++
drivers/crypto/qat/qat_common/qat_crypto.c | 189 ++++++++++------
drivers/crypto/qat/qat_common/qat_crypto_user.c | 251 +++++++++++++++++++++
drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 86 -------
6 files changed, 417 insertions(+), 146 deletions(-)
create mode 100644 drivers/crypto/qat/qat_common/qat_crypto_user.c
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 9586e5e..a6d0300 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -176,6 +176,18 @@ struct adf_hw_device_data {
#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+#define GET_INST(accel_dev) (&accel_dev->crypto_list)
+#ifdef CONFIG_CRYPTO_DEV_QAT_USERSPACE
+#define GET_USER_INST(accel_dev) (&accel_dev->crypto_user_list)
+#else
+static LIST_HEAD(empty);
+static inline struct list_head *__get_empty_list(void)
+{
+ return ∅
+}
+
+#define GET_USER_INST(accel_dev) __get_empty_list()
+#endif
struct adf_admin_comms;
struct icp_qat_fw_loader_handle;
@@ -195,6 +207,9 @@ struct adf_accel_dev {
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
struct list_head crypto_list;
+#ifdef CONFIG_CRYPTO_DEV_QAT_USERSPACE
+ struct list_head crypto_user_list;
+#endif
unsigned long status;
atomic_t ref_count;
struct dentry *debugfs_dir;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index c7ac758..eab645e 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -49,6 +49,7 @@
#define ADF_GENERAL_SEC "GENERAL"
#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_USER_SEC "USER"
#define ADF_ACCEL_SEC "Accelerator"
#define ADF_NUM_CY "NumberCyInstances"
#define ADF_NUM_DC "NumberDcInstances"
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5e8f9d4..9a00a07 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -131,7 +131,9 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev);
void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
int qat_crypto_register(void);
int qat_crypto_unregister(void);
-struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+int qat_crypto_configure_instances(struct adf_accel_dev *accel_dev);
+struct qat_crypto_instance *qat_crypto_get_kernel_instance(void);
+struct qat_crypto_instance *qat_crypto_get_user_instance(void);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
int qat_algs_init(void);
@@ -189,4 +191,21 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
+#ifdef CONFIG_CRYPTO_DEV_QAT_USERSPACE
+int qat_crypto_configure_user_instances(struct adf_accel_dev *accel_dev);
+int qat_crypto_create_user_instances(struct adf_accel_dev *accel_dev);
+void qat_crypto_free_user_instances(struct adf_accel_dev *accel_dev);
+#else
+static inline int qat_crypto_configure_user_instances(struct adf_accel_dev *dev)
+{
+ return 0;
+}
+
+static inline int qat_crypto_create_user_instances(struct adf_accel_dev *dev)
+{
+ return 0;
+}
+
+#define qat_crypto_free_user_instances(dev) do {} while (0)
+#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 828f2a6..17a0a0b 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -53,6 +53,7 @@
#include "adf_cfg_strings.h"
#include "qat_crypto.h"
#include "icp_qat_fw.h"
+#include "adf_transport_access_macros.h"
#define SEC ADF_KERNEL_SEC
@@ -64,13 +65,14 @@ void qat_crypto_put_instance(struct qat_crypto_instance *inst)
adf_dev_put(inst->accel_dev);
}
-static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+static void qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
{
struct qat_crypto_instance *inst;
struct list_head *list_ptr, *tmp;
- int i;
list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
+ int i;
+
inst = list_entry(list_ptr, struct qat_crypto_instance, list);
for (i = 0; i < atomic_read(&inst->refctr); i++)
@@ -82,47 +84,60 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
if (inst->sym_rx)
adf_remove_ring(inst->sym_rx);
- if (inst->pke_tx)
- adf_remove_ring(inst->pke_tx);
-
- if (inst->pke_rx)
- adf_remove_ring(inst->pke_rx);
-
- if (inst->rnd_tx)
- adf_remove_ring(inst->rnd_tx);
-
- if (inst->rnd_rx)
- adf_remove_ring(inst->rnd_rx);
-
list_del(list_ptr);
kfree(inst);
}
- return 0;
}
-struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+static inline int get_current_node(void)
+{
+ return cpu_data(current_thread_info()->cpu).phys_proc_id;
+}
+
+static struct adf_accel_dev *qat_crypto_get_dev_node(void)
{
struct adf_accel_dev *accel_dev = NULL;
- struct qat_crypto_instance *inst_best = NULL;
struct list_head *itr;
+ int node = get_current_node();
unsigned long best = ~0;
list_for_each(itr, adf_devmgr_get_head()) {
- accel_dev = list_entry(itr, struct adf_accel_dev, list);
- if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
- dev_to_node(&GET_DEV(accel_dev)) < 0)
- && adf_dev_started(accel_dev))
- break;
- accel_dev = NULL;
+ struct adf_accel_dev *accel_dev_curr =
+ list_entry(itr, struct adf_accel_dev, list);
+ unsigned long cur;
+
+ if ((node == dev_to_node(&GET_DEV(accel_dev_curr)) ||
+ dev_to_node(&GET_DEV(accel_dev_curr)) < 0) &&
+ adf_dev_started(accel_dev_curr)) {
+ cur = atomic_read(&accel_dev_curr->ref_count);
+ if (best > cur) {
+ accel_dev = accel_dev_curr;
+ best = cur;
+ }
+ }
}
if (!accel_dev) {
- pr_err("QAT: Could not find device on node %d\n", node);
+ pr_err("QAT: Could not find a device on node %d\n", node);
accel_dev = adf_devmgr_get_first();
}
if (!accel_dev || !adf_dev_started(accel_dev))
return NULL;
- list_for_each(itr, &accel_dev->crypto_list) {
+ return accel_dev;
+}
+
+static struct qat_crypto_instance *qat_crypto_get_instance(bool user)
+{
+ struct adf_accel_dev *accel_dev = qat_crypto_get_dev_node();
+ struct qat_crypto_instance *inst_best = NULL;
+ struct list_head *list, *itr;
+ unsigned long best = ~0;
+
+ if (!accel_dev)
+ return NULL;
+
+ list = user ? GET_USER_INST(accel_dev) : GET_INST(accel_dev);
+ list_for_each(itr, list) {
struct qat_crypto_instance *inst;
unsigned long cur;
@@ -137,7 +152,7 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
if (atomic_add_return(1, &inst_best->refctr) == 1) {
if (adf_dev_get(accel_dev)) {
atomic_dec(&inst_best->refctr);
- pr_err("QAT: Could increment dev refctr\n");
+ pr_err("QAT: Couldn't increment inst refctr\n");
return NULL;
}
}
@@ -145,15 +160,22 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
return inst_best;
}
+struct qat_crypto_instance *qat_crypto_get_kernel_instance()
+{
+ return qat_crypto_get_instance(false);
+}
+
+struct qat_crypto_instance *qat_crypto_get_user_instance()
+{
+ return qat_crypto_get_instance(true);
+}
+
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
{
- int i;
- unsigned long bank;
- unsigned long num_inst, num_msg_sym, num_msg_asym;
- int msg_size;
- struct qat_crypto_instance *inst;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ unsigned long num_inst;
+ int i;
INIT_LIST_HEAD(&accel_dev->crypto_list);
strlcpy(key, ADF_NUM_CY, sizeof(key));
@@ -165,8 +187,11 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
return -EFAULT;
for (i = 0; i < num_inst; i++) {
- inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
- dev_to_node(&GET_DEV(accel_dev)));
+ unsigned long bank, num_msg_sym;
+ int msg_size;
+ struct qat_crypto_instance *inst =
+ kzalloc_node(sizeof(*inst), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!inst)
goto err;
@@ -187,67 +212,103 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
if (kstrtoul(val, 10, &num_msg_sym))
goto err;
num_msg_sym = num_msg_sym >> 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
- if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
- goto err;
-
- if (kstrtoul(val, 10, &num_msg_asym))
- goto err;
- num_msg_asym = num_msg_asym >> 1;
-
msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
msg_size, key, NULL, 0, &inst->sym_tx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, NULL, 0, &inst->rnd_tx))
- goto err;
-
- msg_size = msg_size >> 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, NULL, 0, &inst->pke_tx))
- goto err;
-
msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
msg_size, key, qat_alg_callback, 0,
&inst->sym_rx))
goto err;
+ }
+ return 0;
+err:
+ qat_crypto_free_instances(accel_dev);
+ return -ENOMEM;
+}
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
- &inst->rnd_rx))
+/**
+ * qat_crypto_configure_instances() - Add instances configuration
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Function adds the configuration required to create crypto instances.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int qat_crypto_configure_instances(struct adf_accel_dev *accel_dev)
+{
+ int cpus = num_online_cpus();
+ int banks = GET_MAX_BANKS(accel_dev);
+ int i, instances = min(cpus, banks);
+ unsigned long val;
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ goto err;
+
+ for (i = 0; i < instances; i++) {
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+
+ val = i;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
- &inst->pke_rx))
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
goto err;
}
+ val = i;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ goto err;
return 0;
err:
- qat_crypto_free_instances(accel_dev);
- return -ENOMEM;
+ return -EFAULT;
}
+EXPORT_SYMBOL_GPL(qat_crypto_configure_instances);
static int qat_crypto_init(struct adf_accel_dev *accel_dev)
{
if (qat_crypto_create_instances(accel_dev))
return -EFAULT;
-
+ if (qat_crypto_create_user_instances(accel_dev)) {
+ qat_crypto_free_instances(accel_dev);
+ return -EFAULT;
+ }
return 0;
}
static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
{
- return qat_crypto_free_instances(accel_dev);
+ qat_crypto_free_instances(accel_dev);
+ qat_crypto_free_user_instances(accel_dev);
+ return 0;
}
static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/qat_crypto_user.c b/drivers/crypto/qat/qat_common/qat_crypto_user.c
new file mode 100644
index 0000000..536702a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto_user.c
@@ -0,0 +1,251 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+#include "adf_transport_access_macros.h"
+
+#define SECUSR ADF_USER_SEC
+
+void qat_crypto_free_user_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_crypto_instance *inst;
+ struct list_head *list_ptr, *tmp;
+
+ list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_user_list) {
+ int i;
+
+ inst = list_entry(list_ptr, struct qat_crypto_instance, list);
+
+ for (i = 0; i < atomic_read(&inst->refctr); i++)
+ qat_crypto_put_instance(inst);
+
+ if (inst->sym_tx)
+ adf_remove_ring(inst->sym_tx);
+
+ if (inst->sym_rx)
+ adf_remove_ring(inst->sym_rx);
+
+ if (inst->pke_tx)
+ adf_remove_ring(inst->pke_tx);
+
+ if (inst->pke_rx)
+ adf_remove_ring(inst->pke_rx);
+
+ list_del(list_ptr);
+ kfree(inst);
+ }
+}
+
+int qat_crypto_create_user_instances(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ unsigned long num_inst;
+ int i;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_user_list);
+ strlcpy(key, ADF_NUM_CY, sizeof(key));
+
+ if (adf_cfg_get_param_value(accel_dev, SECUSR, key, val))
+ return -EFAULT;
+
+ if (kstrtoul(val, 0, &num_inst))
+ return -EFAULT;
+
+ for (i = 0; i < num_inst; i++) {
+ unsigned long bank, num_msg_sym, num_msg_asym;
+ int msg_size;
+ struct qat_crypto_instance *inst =
+ kzalloc_node(sizeof(*inst), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!inst)
+ goto err;
+
+ list_add_tail(&inst->list, &accel_dev->crypto_user_list);
+ inst->id = i;
+ atomic_set(&inst->refctr, 0);
+ inst->accel_dev = accel_dev;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+ if (adf_cfg_get_param_value(accel_dev, SECUSR, key, val))
+ goto err;
+
+ if (kstrtoul(val, 10, &bank))
+ goto err;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ if (adf_cfg_get_param_value(accel_dev, SECUSR, key, val))
+ goto err;
+
+ if (kstrtoul(val, 10, &num_msg_sym))
+ goto err;
+ num_msg_sym = num_msg_sym >> 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ if (adf_cfg_get_param_value(accel_dev, SECUSR, key, val))
+ goto err;
+
+ if (kstrtoul(val, 10, &num_msg_asym))
+ goto err;
+ num_msg_asym = num_msg_asym >> 1;
+
+ msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ if (adf_create_ring(accel_dev, SECUSR, bank, num_msg_sym,
+ msg_size, key, NULL, 0, &inst->sym_tx))
+ goto err;
+
+ msg_size = msg_size >> 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ if (adf_create_ring(accel_dev, SECUSR, bank, num_msg_asym,
+ msg_size, key, NULL, 0, &inst->pke_tx))
+ goto err;
+
+ msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ if (adf_create_ring(accel_dev, SECUSR, bank, num_msg_sym,
+ msg_size, key, qat_user_callback, 0,
+ &inst->sym_rx))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ if (adf_create_ring(accel_dev, SECUSR, bank, num_msg_asym,
+ msg_size, key, qat_user_asym_callback, 0,
+ &inst->pke_rx))
+ goto err;
+ }
+ return 0;
+err:
+ qat_crypto_free_user_instances(accel_dev);
+ return -ENOMEM;
+}
+
+/**
+ * qat_crypto_configure_user_instances() - Add userspace instances configuration
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Function adds the configuration required to create user space instances.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int qat_crypto_configure_user_instances(struct adf_accel_dev *accel_dev)
+{
+ int cpus = num_online_cpus();
+ int banks = GET_MAX_BANKS(accel_dev);
+ int i, instances = min(cpus, banks);
+ unsigned long val;
+
+ if (adf_cfg_section_add(accel_dev, ADF_USER_SEC))
+ goto err;
+
+ for (i = 0; i < instances; i++) {
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+
+ val = (i + instances) % banks;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+ (i + instances) % banks);
+ if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 3;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 9;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 11;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+ }
+
+ val = i;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_USER_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ goto err;
+ return 0;
+err:
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(qat_crypto_configure_user_instances);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 948f66b..8df453a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -60,7 +60,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
-#include <adf_transport_access_macros.h>
#include "adf_dh895xcc_hw_data.h"
#include "adf_drv.h"
@@ -121,88 +120,13 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
static int qat_dev_start(struct adf_accel_dev *accel_dev)
{
- int cpus = num_online_cpus();
- int banks = GET_MAX_BANKS(accel_dev);
- int instances = min(cpus, banks);
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int i;
- unsigned long val;
-
- if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
- goto err;
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
goto err;
- for (i = 0; i < instances; i++) {
- val = i;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
- i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
- val = 128;
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 2;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 4;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 8;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 10;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = 12;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, (void *)&val, ADF_DEC))
- goto err;
- }
- val = i;
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- ADF_NUM_CY, (void *)&val, ADF_DEC))
+ if (qat_crypto_configure_instances(accel_dev))
+ goto err;
+
+ if (qat_crypto_configure_user_instances(accel_dev))
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
@@ -239,7 +163,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
- dev_to_node(&pdev->dev));
+ dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-06 19:59 [PATCH RFC 0/4] crypto: qat - Add userspace access to QAT services Tadeusz Struk
` (2 preceding siblings ...)
2014-11-06 19:59 ` [PATCH RFC 3/4] crypto: qat - Add userspace instances Tadeusz Struk
@ 2014-11-06 19:59 ` Tadeusz Struk
2014-11-07 2:56 ` Herbert Xu
3 siblings, 1 reply; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-06 19:59 UTC (permalink / raw)
To: herbert; +Cc: davem, linux-crypto, qat-linux
Add new socket algif interface for userspace for symmetric and asymmetric
crypto.
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
drivers/crypto/qat/Kconfig | 9
drivers/crypto/qat/qat_common/Makefile | 3
drivers/crypto/qat/qat_common/adf_common_drv.h | 18 +
drivers/crypto/qat/qat_common/adf_ctl_drv.c | 12
drivers/crypto/qat/qat_common/algif_qat.c | 532 ++++++++++++++++
drivers/crypto/qat/qat_common/algif_qat_asym.c | 791 ++++++++++++++++++++++++
drivers/crypto/qat/qat_common/qat_algs.c | 22 -
drivers/crypto/qat/qat_common/qat_bufs.h | 65 ++
8 files changed, 1432 insertions(+), 20 deletions(-)
create mode 100644 drivers/crypto/qat/qat_common/algif_qat.c
create mode 100644 drivers/crypto/qat/qat_common/algif_qat_asym.c
create mode 100644 drivers/crypto/qat/qat_common/qat_bufs.h
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 49bede2..f0827f1 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -21,3 +21,12 @@ config CRYPTO_DEV_QAT_DH895xCC
To compile this as a module, choose M here: the module
will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_USERSPACE
+ bool "Support for userspace access to Intel(R) QAT (EXPERIMENTAL)"
+ depends on CRYPTO_DEV_QAT && CRYPTO_USER_API
+ default n
+ help
+ Support for userspace access to Intel(R) QuickAssist Technology
+ acceleration.
+ If unsure, say N.
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index e0424dc..ead901d 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -12,3 +12,6 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_CRYPTO_DEV_QAT_USERSPACE) += algif_qat.o \
+ algif_qat_asym.o \
+ qat_crypto_user.o
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 9a00a07..73bd78b 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -136,6 +136,8 @@ struct qat_crypto_instance *qat_crypto_get_kernel_instance(void);
struct qat_crypto_instance *qat_crypto_get_user_instance(void);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
+void qat_user_callback(void *resp);
+void qat_user_asym_callback(void *resp);
int qat_algs_init(void);
void qat_algs_exit(void);
int qat_algs_register(void);
@@ -192,10 +194,26 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
#ifdef CONFIG_CRYPTO_DEV_QAT_USERSPACE
+int algif_qat_init(void);
+void algif_qat_exit(void);
+int algif_qat_asym_init(void);
+void algif_qat_asym_exit(void);
int qat_crypto_configure_user_instances(struct adf_accel_dev *accel_dev);
int qat_crypto_create_user_instances(struct adf_accel_dev *accel_dev);
void qat_crypto_free_user_instances(struct adf_accel_dev *accel_dev);
#else
+static inline int algif_qat_init(void)
+{
+ return 0;
+}
+
+#define algif_qat_exit() do {} while (0)
+static inline int algif_qat_asym_init(void)
+{
+ return 0;
+}
+
+#define algif_qat_asym_exit() do {} while (0)
static inline int qat_crypto_configure_user_instances(struct adf_accel_dev *dev)
{
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index a660539..464e50f 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -460,8 +460,18 @@ static int __init adf_register_ctl_device_driver(void)
if (qat_crypto_register())
goto err_crypto_register;
+ if (algif_qat_init())
+ goto err_algif;
+
+ if (algif_qat_asym_init())
+ goto err_all;
+
return 0;
+err_all:
+ algif_qat_exit();
+err_algif:
+ qat_crypto_unregister();
err_crypto_register:
adf_exit_aer();
err_aer:
@@ -479,6 +489,8 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_exit_aer();
qat_crypto_unregister();
qat_algs_exit();
+ algif_qat_exit();
+ algif_qat_asym_exit();
mutex_destroy(&adf_ctl_lock);
}
diff --git a/drivers/crypto/qat/qat_common/algif_qat.c b/drivers/crypto/qat/qat_common/algif_qat.c
new file mode 100644
index 0000000..53c0669
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/algif_qat.c
@@ -0,0 +1,532 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <crypto/if_alg.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <net/sock.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "qat_bufs.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+static unsigned int pool_id;
+
+#define SYM_BUF_NUM 16
+#define BUF_REQ_SIZE (sizeof(struct qat_alg_buf_list) + \
+ (SYM_BUF_NUM * sizeof(struct qat_alg_buf)))
+
+struct qat_algif_cy_ctx;
+
+struct qat_algif_request {
+ struct qat_alg_buf_list bufl;
+ struct qat_alg_buf buf_arr[SYM_BUF_NUM];
+ dma_addr_t src_paddr;
+ size_t data_len;
+ struct qat_algif_cy_ctx *ctx;
+ struct icp_qat_fw_la_resp resp;
+ atomic_t done;
+ struct list_head list;
+} __packed __aligned(64);
+
+struct qat_algif_cy_ctx {
+ struct crypto_ctx {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+ } crypto_ctx;
+ dma_addr_t ctx_paddr;
+ struct icp_qat_fw_la_bulk_req req;
+ struct qat_crypto_instance *inst;
+ struct qat_algif_request *current_req;
+ struct kmem_cache *cache;
+ mempool_t *pool;
+ struct list_head queue;
+ atomic_t data_available;
+} __aligned(64);
+
+static void qat_cache_constructor(void *v)
+{
+ memset(v, '\0', sizeof(struct qat_algif_request));
+}
+
+static void qat_mempool_free(void *_req, void *pool_data)
+{
+ struct qat_algif_cy_ctx *ctx = pool_data;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct kmem_cache *cache = ctx->cache;
+ struct qat_algif_request *req = _req;
+
+ dma_unmap_single(&GET_DEV(accel_dev), req->src_paddr,
+ BUF_REQ_SIZE, DMA_TO_DEVICE);
+ kmem_cache_free(cache, _req);
+}
+
+static void *qat_mempool_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ struct qat_algif_cy_ctx *ctx = pool_data;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct kmem_cache *cache = ctx->cache;
+ struct qat_algif_request *req;
+
+ req = kmem_cache_alloc_node(cache, gfp_mask,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (req) {
+ req->src_paddr = dma_map_single(&GET_DEV(accel_dev), &req->bufl,
+ BUF_REQ_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ req->src_paddr))) {
+ kmem_cache_free(cache, req);
+ return NULL;
+ }
+ }
+ return req;
+}
+
+static int qat_mempool_create_pool(struct qat_algif_cy_ctx *ctx,
+ int num_elems, size_t size)
+{
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ char buf[32];
+
+ snprintf(buf, sizeof(buf), "qat_algif_sym_pool_%d", pool_id++);
+ ctx->cache = kmem_cache_create(buf, size, 0, SLAB_HWCACHE_ALIGN,
+ qat_cache_constructor);
+ if (unlikely(!ctx->cache))
+ return -ENOMEM;
+
+ ctx->pool = mempool_create_node(num_elems, qat_mempool_alloc,
+ qat_mempool_free, ctx, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+
+ if (unlikely(!ctx->pool)) {
+ kmem_cache_destroy(ctx->cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void qat_mempool_destroy_pool(struct qat_algif_cy_ctx *ctx)
+{
+ if (ctx->pool)
+ mempool_destroy(ctx->pool);
+
+ if (ctx->cache)
+ kmem_cache_destroy(ctx->cache);
+
+ ctx->cache = NULL;
+ ctx->pool = NULL;
+}
+
+void qat_user_callback(void *_resp)
+{
+ struct icp_qat_fw_la_resp *resp = ACCESS_ONCE(_resp);
+ struct qat_algif_request *req =
+ (struct qat_algif_request *)resp->opaque_data;
+ struct qat_algif_cy_ctx *ctx = req->ctx;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct qat_alg_buf_list *bufl = &req->bufl;
+ struct qat_alg_buf *buf = bufl->bufers;
+ int i;
+
+ for (i = 0; i < bufl->num_bufs; i++, buf++)
+ dma_unmap_single(dev, buf->addr, buf->len, DMA_BIDIRECTIONAL);
+
+ req->resp.comn_resp = resp->comn_resp;
+ atomic_set(&req->done, 1);
+ atomic_set(&ctx->data_available, 1);
+}
+
+static int qat_sendmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t size)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_cy_ctx *ctx = ask->private;
+ struct qat_algif_request *req;
+ int ret = -EINVAL;
+
+ lock_sock(sk);
+ if (unlikely(!ctx || !ctx->pool))
+ goto out;
+
+ if (ctx->current_req) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (unlikely(size != sizeof(struct icp_qat_fw_la_bulk_req)))
+ goto out;
+
+ ret = memcpy_fromiovec((unsigned char *)&ctx->req, msg->msg_iov, size);
+ if (ret)
+ goto out;
+
+ req = mempool_alloc(ctx->pool, GFP_KERNEL);
+ if (!req) {
+ pr_err("QAT: user mempool alloc failed\n");
+ ctx->current_req = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+ req->data_len = ctx->req.comn_mid.src_length;
+ if (unlikely(!req->data_len))
+ goto out;
+ ctx->req.comn_mid.src_length = 0;
+ req->resp.opaque_data = ctx->req.comn_mid.opaque_data;
+ ctx->req.comn_mid.opaque_data = (uint64_t)(__force long)req;
+ ctx->req.comn_mid.src_data_addr = req->src_paddr;
+ ctx->req.comn_mid.dest_data_addr = req->src_paddr;
+ ctx->req.cd_pars.u.s.content_desc_addr = ctx->ctx_paddr;
+ req->ctx = ctx;
+ req->bufl.num_bufs = 0;
+ atomic_set(&req->done, 0);
+ ctx->current_req = req;
+ ret = size;
+out:
+ release_sock(sk);
+ return ret;
+}
+
+static ssize_t qat_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_cy_ctx *ctx = ask->private;
+ struct qat_algif_request *req;
+ struct device *dev;
+ struct qat_alg_buf_list *bufl;
+ struct qat_alg_buf *buf;
+ int ret = -EFAULT, i, ctr = 0;
+
+ lock_sock(sk);
+ if (unlikely(!ctx))
+ goto out;
+
+ req = ctx->current_req;
+ if (unlikely(!req || !req->data_len))
+ goto out;
+
+ dev = &GET_DEV(ctx->inst->accel_dev);
+ bufl = &req->bufl;
+ buf = bufl->bufers + bufl->num_bufs;
+ buf->addr = dma_map_single(dev, page_address(page) + offset, size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, buf->addr))) {
+ dev_err(dev, "QAT: failed to dma_map error\n");
+ if (bufl->num_bufs)
+ goto unmap_out;
+ goto out;
+ }
+ buf->len = size;
+ bufl->num_bufs++;
+ if (unlikely(bufl->num_bufs >= SYM_BUF_NUM)) {
+ pr_err("QAT: too many user buffers\n");
+ goto unmap_out;
+ }
+ req->data_len -= size;
+ if (!(flags & MSG_SENDPAGE_NOTLAST)) {
+ /* It's much easier to keep retrying here
+ * than to go back to userspace and retry from there */
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx,
+ (uint32_t *)&ctx->req);
+ } while (ret == -EAGAIN && ctr++ < 10);
+ if (ret == -EAGAIN) {
+ ctr = 0;
+ do {
+ usleep_range(10, 20);
+ ret = adf_send_message(ctx->inst->sym_tx,
+ (uint32_t *)&ctx->req);
+ } while (ret == -EAGAIN && ctr++ < 300);
+ }
+ if (ret == -EAGAIN) {
+ pr_err("QAT: Can't put user msg\n");
+ goto unmap_out;
+ }
+ list_add(&req->list, &ctx->queue);
+ ctx->current_req = NULL;
+ WARN_ON(req->data_len);
+ req->data_len = 0;
+ }
+ ret = size;
+ goto out;
+unmap_out:
+ for (i = 0; i < bufl->num_bufs; i++) {
+ buf = bufl->bufers + i;
+ if (!dma_mapping_error(dev, buf->addr))
+ dma_unmap_single(dev, buf->addr, buf->len,
+ DMA_BIDIRECTIONAL);
+ }
+ mempool_free(req, ctx->pool);
+out:
+ release_sock(sk);
+ return ret;
+}
+
+static unsigned int qat_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_cy_ctx *ctx = ask->private;
+ unsigned int mask = 0;
+
+ if (unlikely(!ctx))
+ return 0;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
+ if (atomic_read(&ctx->data_available))
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+}
+
+static int qat_recvmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t ignored, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_cy_ctx *ctx = ask->private;
+ struct qat_algif_request *req;
+ struct list_head *list_ptr, *tmp;
+ struct iovec *iov = msg->msg_iov;
+ unsigned long iovlen = msg->msg_iovlen;
+ int ret = 0;
+
+ lock_sock(sk);
+
+ if (unlikely(!ctx || !ctx->pool))
+ goto out;
+
+ if (!atomic_read(&ctx->data_available))
+ goto out;
+
+ for (; iovlen > 0; iovlen--, iov++) {
+ unsigned long len = iov->iov_len;
+ char __user *ptr = iov->iov_base;
+ struct icp_qat_fw_la_resp *resp;
+
+ if (unlikely(len < sizeof(*resp))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ list_for_each_safe(list_ptr, tmp, &ctx->queue) {
+ req = list_entry(list_ptr, struct qat_algif_request,
+ list);
+ if (!atomic_read(&req->done))
+ break;
+ if (unlikely(len < sizeof(*resp))) {
+ WARN(len, "buff size not msg size aligned\n");
+ break;
+ }
+ resp = &req->resp;
+ if (copy_to_user(ptr, (void *)resp, sizeof(*resp)))
+ goto out;
+ ret += sizeof(*resp);
+ len -= sizeof(*resp);
+ ptr += sizeof(*resp);
+ list_del(list_ptr);
+ mempool_free(req, ctx->pool);
+ }
+ }
+out:
+ atomic_set(&ctx->data_available, list_empty(&ctx->queue) ? 0 : 1);
+ release_sock(sk);
+ return ret;
+}
+
+static struct proto_ops algif_qat_ops = {
+ .family = PF_ALG,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+ .release = af_alg_release,
+ .sendmsg = qat_sendmsg,
+ .sendpage = qat_sendpage,
+ .recvmsg = qat_recvmsg,
+ .poll = qat_poll,
+};
+
+static void *qat_bind(const char *name, u32 type, u32 mask)
+{
+ struct qat_crypto_instance *inst;
+ struct qat_algif_cy_ctx *ctx = NULL;
+ struct device *dev;
+
+ if (strcmp(name, "sym_crypto"))
+ return ERR_PTR(-EINVAL);
+
+ inst = qat_crypto_get_user_instance();
+ if (!inst)
+ return ERR_PTR(-EFAULT);
+
+ dev = &GET_DEV(inst->accel_dev);
+ ctx = kzalloc_node(sizeof(*ctx), GFP_KERNEL, dev_to_node(dev));
+ if (!ctx)
+ goto err;
+
+ ctx->inst = inst;
+ INIT_LIST_HEAD(&ctx->queue);
+ ctx->ctx_paddr = dma_map_single(dev, &ctx->crypto_ctx,
+ sizeof(struct crypto_ctx),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, ctx->ctx_paddr)))
+ goto err2;
+
+ if (qat_mempool_create_pool(ctx, 512, sizeof(struct qat_algif_request)))
+ goto err3;
+
+ return ctx;
+err3:
+ dma_unmap_single(dev, ctx->ctx_paddr, sizeof(struct crypto_ctx),
+ DMA_TO_DEVICE);
+err2:
+ kfree(ctx);
+err:
+ qat_crypto_put_instance(inst);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qat_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+ struct qat_algif_cy_ctx *ctx = private;
+
+ if (!ctx)
+ return -EINVAL;
+ memcpy(&ctx->crypto_ctx, key, keylen);
+ return 0;
+}
+
+static void qat_release(void *private)
+{
+ struct qat_algif_cy_ctx *ctx = private;
+ struct device *dev;
+ struct qat_algif_request *req;
+ struct list_head *list_ptr, *tmp;
+ int ctr = 0;
+
+ if (!ctx)
+ return;
+
+ /* wait for outstanding requests */
+ while (!list_empty(&ctx->queue) && ctr++ < 100)
+ msleep(300);
+
+ dev = &GET_DEV(ctx->inst->accel_dev);
+ dma_unmap_single(dev, ctx->ctx_paddr, sizeof(struct crypto_ctx),
+ DMA_TO_DEVICE);
+ list_for_each_safe(list_ptr, tmp, &ctx->queue) {
+ req = list_entry(list_ptr, struct qat_algif_request, list);
+ list_del(list_ptr);
+ mempool_free(req, ctx->pool);
+ }
+ qat_mempool_destroy_pool(ctx);
+ qat_crypto_put_instance(ctx->inst);
+ memset(ctx, '\0', sizeof(*ctx));
+ kfree(ctx);
+}
+
+static void qat_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_cy_ctx *ctx = ask->private;
+
+ if (atomic_read(&ctx->data_available))
+ pr_info("QAT: still have data.\n");
+
+ af_alg_release_parent(sk);
+}
+
+static int qat_accept(void *private, struct sock *sk)
+{
+ struct qat_algif_cy_ctx *ctx = private;
+ struct alg_sock *ask = alg_sk(sk);
+
+ ask->private = ctx;
+ sk->sk_destruct = qat_sock_destruct;
+ return 0;
+}
+
+static const struct af_alg_type algif_type_qat = {
+ .bind = qat_bind,
+ .release = qat_release,
+ .setkey = qat_setkey,
+ .accept = qat_accept,
+ .ops = &algif_qat_ops,
+ .name = "qat_sym",
+ .owner = THIS_MODULE
+};
+
+int __init algif_qat_init(void)
+{
+ return af_alg_register_type(&algif_type_qat);
+}
+
+void __exit algif_qat_exit(void)
+{
+ af_alg_unregister_type(&algif_type_qat);
+}
diff --git a/drivers/crypto/qat/qat_common/algif_qat_asym.c b/drivers/crypto/qat/qat_common/algif_qat_asym.c
new file mode 100644
index 0000000..164f96d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/algif_qat_asym.c
@@ -0,0 +1,791 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <crypto/if_alg.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <net/sock.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "qat_bufs.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_pke.h"
+
+static unsigned int pool_id;
+#define NUM_PARAMS 8
+#define INPUT_SIZE (8 * NUM_PARAMS)
+#define ALIGN_PKE_ADDR(addr) ((void *)ALIGN((uint64_t)addr, 64))
+struct qat_algif_asym_cy_ctx;
+
+struct qat_asym_algif_req {
+ uint64_t in_params_tab[NUM_PARAMS];
+ uint64_t out_params_tab[NUM_PARAMS];
+ dma_addr_t in_paddr;
+ dma_addr_t out_paddr;
+ void *in_params[NUM_PARAMS];
+ void *out_params[NUM_PARAMS];
+ uint64_t in_params_v[NUM_PARAMS];
+ uint64_t out_params_v[NUM_PARAMS];
+ struct icp_qat_fw_pke_request pke_req;
+ struct icp_qat_fw_pke_resp resp;
+ dma_addr_t pke_req_paddr;
+ unsigned int in_param_sz;
+ unsigned int out_param_sz;
+ struct qat_algif_asym_cy_ctx *ctx;
+ atomic_t done;
+ struct list_head list;
+ struct list_head chain;
+} __packed __aligned(64);
+
+struct qat_algif_asym_cy_ctx {
+ struct qat_crypto_instance *inst;
+ struct kmem_cache *cache;
+ mempool_t *pool;
+ struct list_head queue;
+ atomic_t data_available;
+};
+
+static unsigned int pke_param_sizes[] = {
+ 16, 20, 24, 28, 32, 48,
+ 64, 72, 96, 128, 192,
+ 256, 320, 384, 448, 512
+};
+
+static unsigned int qat_align_pke_param_size(unsigned int size)
+{
+ unsigned int mod = size % 64;
+ unsigned int diff = mod ? 64 - mod : 0;
+
+ return size + diff;
+}
+
+static unsigned int qat_get_pke_input_param_size(unsigned int val,
+ unsigned int index)
+{
+ unsigned int mask = 0xf << (index * 4);
+
+ return pke_param_sizes[(val & mask) >> (index * 4)];
+}
+
+static unsigned int qat_get_pke_output_param_size(unsigned int val,
+ unsigned int index)
+{
+ if (index > 3)
+ index = 3;
+ return qat_get_pke_input_param_size(val, index);
+}
+
+static void qat_cache_constructor(void *v)
+{
+ memset(v, '\0', sizeof(struct qat_asym_algif_req));
+}
+
+static void qat_mempool_free(void *_req, void *pool_data)
+{
+ struct qat_algif_asym_cy_ctx *ctx = pool_data;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct kmem_cache *cache = ctx->cache;
+ struct qat_asym_algif_req *req = _req;
+
+ dma_unmap_single(&GET_DEV(accel_dev), req->in_paddr, INPUT_SIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_single(&GET_DEV(accel_dev), req->out_paddr, INPUT_SIZE,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(&GET_DEV(accel_dev), req->pke_req_paddr,
+ sizeof(struct icp_qat_fw_pke_request), DMA_TO_DEVICE);
+ kmem_cache_free(cache, _req);
+}
+
+static void *qat_mempool_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ struct qat_algif_asym_cy_ctx *ctx = pool_data;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct kmem_cache *cache = ctx->cache;
+ struct qat_asym_algif_req *req;
+
+ req = kmem_cache_alloc_node(cache, gfp_mask,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (req) {
+ req->in_paddr = dma_map_single(&GET_DEV(accel_dev),
+ req->in_params_tab,
+ INPUT_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ req->in_paddr)))
+ goto err_free;
+
+ req->out_paddr = dma_map_single(&GET_DEV(accel_dev),
+ req->out_params_tab,
+ INPUT_SIZE, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ req->out_paddr)))
+ goto err_unmap_input;
+
+ req->pke_req_paddr =
+ dma_map_single(&GET_DEV(accel_dev),
+ &req->pke_req,
+ sizeof(struct icp_qat_fw_pke_request),
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ req->pke_req_paddr)))
+ goto err_unmap_output;
+ }
+ return req;
+err_unmap_output:
+ dma_unmap_single(&GET_DEV(accel_dev), req->out_paddr,
+ sizeof(req->out_params_tab), DMA_BIDIRECTIONAL);
+err_unmap_input:
+ dma_unmap_single(&GET_DEV(accel_dev), req->in_paddr,
+ sizeof(req->in_params_tab), DMA_TO_DEVICE);
+err_free:
+ kmem_cache_free(cache, req);
+ return NULL;
+}
+
+static int qat_mempool_create_pool(struct qat_algif_asym_cy_ctx *ctx,
+ int num_elems, size_t size)
+{
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ char buf[32];
+
+ snprintf(buf, sizeof(buf), "qat_algif_asym_pool_%d", pool_id++);
+ ctx->cache = kmem_cache_create(buf, size, 0, SLAB_HWCACHE_ALIGN,
+ qat_cache_constructor);
+ if (unlikely(!ctx->cache))
+ return -ENOMEM;
+
+ ctx->pool = mempool_create_node(num_elems, qat_mempool_alloc,
+ qat_mempool_free, ctx, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (unlikely(!ctx->pool)) {
+ kmem_cache_destroy(ctx->cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void qat_mempool_destroy_pool(struct qat_algif_asym_cy_ctx *ctx)
+{
+ if (ctx->pool)
+ mempool_destroy(ctx->pool);
+
+ if (ctx->cache)
+ kmem_cache_destroy(ctx->cache);
+
+ ctx->cache = NULL;
+ ctx->pool = NULL;
+}
+
+void qat_user_asym_callback(void *_resp)
+{
+ struct icp_qat_fw_pke_resp *resp = ACCESS_ONCE(_resp);
+ struct qat_asym_algif_req *req =
+ (struct qat_asym_algif_req *)resp->opaque_data;
+ struct qat_algif_asym_cy_ctx *ctx = req->ctx;
+
+ req->resp.pke_resp_hdr.comn_resp_flags =
+ resp->pke_resp_hdr.comn_resp_flags;
+ atomic_set(&req->done, 1);
+ atomic_set(&ctx->data_available, 1);
+}
+
+static void qat_unmap_input_params(struct qat_asym_algif_req *req)
+{
+ struct adf_accel_dev *accel_dev = req->ctx->inst->accel_dev;
+ int i;
+
+ for (i = 0; i < req->pke_req.input_param_count; i++) {
+ int size = qat_get_pke_input_param_size(req->in_param_sz, i);
+
+ if (!req->in_params_tab[i])
+ break;
+ dma_unmap_single(&GET_DEV(accel_dev), req->in_params_tab[i],
+ size, DMA_TO_DEVICE);
+ kfree(req->in_params[i]);
+ }
+}
+
+static void qat_unmap_output_params(struct qat_asym_algif_req *req, int cp)
+{
+ struct adf_accel_dev *accel_dev = req->ctx->inst->accel_dev;
+ int i;
+
+ for (i = 0; i < req->pke_req.output_param_count; i++) {
+ int size = qat_get_pke_output_param_size(req->out_param_sz, i);
+
+ if (!req->out_params_tab[i])
+ break;
+ if (cp)
+ if (copy_to_user((void __user *)req->out_params_v[i],
+ ALIGN_PKE_ADDR(req->out_params[i]),
+ size))
+ pr_err("QAT: Failed to copy output param\n");
+
+ dma_unmap_single(&GET_DEV(accel_dev), req->out_params_tab[i],
+ size, DMA_FROM_DEVICE);
+ kfree(req->out_params[i]);
+ }
+}
+
+static int qat_map_input_params(struct qat_asym_algif_req *req)
+{
+ struct adf_accel_dev *accel_dev = req->ctx->inst->accel_dev;
+ void __user *addr;
+ unsigned int i = 0, x, num_param = req->pke_req.input_param_count;
+ int ret = -EINVAL;
+
+ if (unlikely(num_param > NUM_PARAMS)) {
+ pr_err("QAT: too many input params: %d\n", num_param);
+ goto err;
+ }
+
+ addr = (void __user *)(long)req->pke_req.pke_mid.src_data_addr;
+ if (copy_from_user((void *)req->in_params_v, addr,
+ sizeof(uint64_t) * num_param)) {
+ pr_err("QAT: copy input params table from user failed\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ req->in_param_sz = req->pke_req.input_param_size;
+ req->pke_req.input_param_size = 0;
+ for (i = 0; i < num_param; i++) {
+ int size = qat_get_pke_input_param_size(req->in_param_sz, i);
+
+ if (unlikely(size > 512 || !size)) {
+ pr_err("QAT: invalid input param size: %d\n", size);
+ ret = -EINVAL;
+ goto err;
+ }
+ req->in_params[i] =
+ kmalloc_node(qat_align_pke_param_size(size),
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (unlikely(!req->in_params[i])) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (copy_from_user(ALIGN_PKE_ADDR(req->in_params[i]),
+ (void __user *)req->in_params_v[i], size)) {
+ pr_err("QAT: copy input parameter from user failed\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ req->in_params_tab[i] =
+ dma_map_single(&GET_DEV(accel_dev),
+ ALIGN_PKE_ADDR(req->in_params[i]),
+ size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ req->in_params_tab[i]))) {
+ pr_err("QAT: failed to map input param %d size %d\n",
+ i, size);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ for (i = num_param; i < NUM_PARAMS; i++)
+ req->in_params_tab[i] = 0;
+
+ ACCESS_ONCE(req->pke_req.pke_mid.src_data_addr) = req->in_paddr;
+ return 0;
+err:
+ pr_err("QAT: Failed to map input parameters\n");
+ for (x = 0; x < i; x++) {
+ int size = qat_get_pke_input_param_size(req->in_param_sz, x);
+
+ if (!dma_mapping_error(&GET_DEV(accel_dev),
+ req->in_params_tab[x]))
+ dma_unmap_single(&GET_DEV(accel_dev),
+ req->in_params_tab[x],
+ size, DMA_TO_DEVICE);
+ kfree(req->in_params[x]);
+ }
+ return ret;
+}
+
+static int qat_map_output_params(struct qat_asym_algif_req *req)
+{
+ struct adf_accel_dev *accel_dev = req->ctx->inst->accel_dev;
+ uint64_t __user *addr;
+ unsigned int i = 0, x, num_param = req->pke_req.output_param_count;
+ int ret = -EINVAL;
+
+ if (unlikely(num_param > NUM_PARAMS)) {
+ pr_err("QAT: too many output params: %d\n", num_param);
+ goto err;
+ }
+
+ addr = (void __user *)(long)req->pke_req.pke_mid.dest_data_addr;
+ if (copy_from_user((void *)req->out_params_v, addr,
+ sizeof(uint64_t) * num_param)) {
+ pr_err("QAT: copy output params table from user failed\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ req->out_param_sz = req->pke_req.output_param_size;
+ req->pke_req.output_param_size = 0;
+ for (i = 0; i < num_param; i++) {
+ int size = qat_get_pke_output_param_size(req->out_param_sz, i);
+
+ if (unlikely(size > 512)) {
+ pr_err("QAT: invalid output param size: %d\n", size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ req->out_params[i] =
+ kmalloc_node(qat_align_pke_param_size(size),
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (unlikely(!req->out_params[i])) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ req->out_params_tab[i] =
+ dma_map_single(&GET_DEV(accel_dev),
+ ALIGN_PKE_ADDR(req->out_params[i]),
+ size, DMA_BIDIRECTIONAL);
+
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ req->out_params_tab[i]))) {
+ pr_err("QAT: failed to map input param %d size %d\n",
+ i, size);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ for (i = num_param; i < NUM_PARAMS; i++)
+ req->out_params_tab[i] = 0;
+
+ ACCESS_ONCE(req->pke_req.pke_mid.dest_data_addr) = req->out_paddr;
+ return 0;
+err:
+ pr_err("QAT: Failed to map output parameters\n");
+ for (x = 0; x < i; x++) {
+ int size = qat_get_pke_input_param_size(req->out_param_sz, x);
+
+ if (!dma_mapping_error(&GET_DEV(accel_dev),
+ req->out_params_tab[x]))
+ dma_unmap_single(&GET_DEV(accel_dev),
+ req->out_params_tab[x], size,
+ DMA_BIDIRECTIONAL);
+ kfree(req->out_params[x]);
+ }
+ return ret;
+}
+
+static int qat_asym_sendmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t not_used)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_asym_cy_ctx *ctx = ask->private;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct qat_asym_algif_req *req, *req_chain;
+ struct icp_qat_fw_pke_request *pke_req_prev;
+ struct iovec *iov = msg->msg_iov;
+ unsigned long iovlen = msg->msg_iovlen;
+ unsigned long data_len = iov->iov_len;
+ char __user *data = iov->iov_base;
+ char __user *pke_req_chain;
+ dma_addr_t pke_req_prev_paddr;
+ struct list_head *list_ptr, *tmp;
+ int ret = -EINVAL, copied = 0, ctr = 0, num_reqs = 0;
+
+ lock_sock(sk);
+ if (unlikely(!ctx || !ctx->pool))
+ goto out;
+
+ if (unlikely(iovlen != 1))
+ goto out;
+
+ if (unlikely(data_len != sizeof(struct icp_qat_fw_pke_request)))
+ goto out;
+
+ req = mempool_alloc(ctx->pool, GFP_KERNEL);
+ if (!req) {
+ pr_err("QAT: user mempool alloc failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user((void *)&req->pke_req, data, data_len)) {
+ pr_err("QAT: copy data from user failed\n");
+ ret = -EFAULT;
+ goto out_free;
+ }
+ copied += data_len;
+ req->ctx = ctx;
+ ret = qat_map_input_params(req);
+ if (ret)
+ goto out_free;
+ ret = qat_map_output_params(req);
+ if (ret)
+ goto out_free;
+ req->pke_req.pke_hdr.cd_pars.content_desc_addr =
+ accel_dev->fw_loader->mmp_addr;
+ req->resp.opaque_data = req->pke_req.pke_mid.opaque_data;
+ req->pke_req.pke_mid.opaque_data = (uint64_t)(__force long)req;
+ atomic_set(&req->done, 0);
+ INIT_LIST_HEAD(&req->chain);
+ pke_req_chain = (char __user *)req->pke_req.next_req_adr;
+ pke_req_prev = &req->pke_req;
+
+ while (pke_req_chain) {
+ if (unlikely(num_reqs++ > NUM_PARAMS)) {
+ pr_err("QAT: too many chained requests: %d\n",
+ num_reqs);
+ ret = -EINVAL;
+ goto out_free_chain;
+ }
+ req_chain = mempool_alloc(ctx->pool, GFP_KERNEL);
+ if (!req_chain) {
+ pr_err("QAT: user mempool alloc failed\n");
+ ret = -ENOMEM;
+ goto out_free_chain;
+ }
+ list_add(&req_chain->chain, &req->chain);
+ if (copy_from_user((void *)&req_chain->pke_req, pke_req_chain,
+ sizeof(req_chain->pke_req))) {
+ pr_err("QAT: copy from user failed\n");
+ ret = -EFAULT;
+ goto out_free_chain;
+ }
+ req_chain->ctx = ctx;
+ ret = qat_map_input_params(req_chain);
+ if (ret)
+ goto out_free_chain;
+ ret = qat_map_output_params(req_chain);
+ if (ret)
+ goto out_free_chain;
+ copied += sizeof(req_chain->pke_req);
+ req_chain->pke_req.pke_mid.opaque_data =
+ (uint64_t)(__force long)req;
+ req_chain->pke_req.pke_hdr.cd_pars.content_desc_addr =
+ accel_dev->fw_loader->mmp_addr;
+ pke_req_prev_paddr = req_chain->pke_req_paddr;
+ pke_req_prev->next_req_adr = (uint64_t)(__force long)
+ pke_req_prev_paddr;
+ pke_req_prev = &req_chain->pke_req;
+ pke_req_chain = (char __user *)req_chain->pke_req.next_req_adr;
+ }
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx,
+ (uint32_t *)&req->pke_req);
+ } while (ret == -EAGAIN && ctr++ < 10);
+ /* PKE jobs take longer. Try to wait for some to finish */
+ if (ret == -EAGAIN) {
+ ctr = 0;
+ do {
+ usleep_range(100, 120);
+ ret = adf_send_message(ctx->inst->pke_tx,
+ (uint32_t *)&req->pke_req);
+ } while (ret == -EAGAIN && ctr++ < 20000);
+ }
+
+ if (ret == -EAGAIN) {
+ WARN_ONCE(true, "QAT: Can't put asym msg\n");
+ goto out_free_chain;
+ }
+ ret = copied;
+ list_add(&req->list, &ctx->queue);
+ goto out;
+out_free_chain:
+ list_for_each_safe(list_ptr, tmp, &req->chain) {
+ req_chain = list_entry(list_ptr, struct qat_asym_algif_req,
+ chain);
+ qat_unmap_input_params(req_chain);
+ qat_unmap_output_params(req_chain, 0);
+ list_del(list_ptr);
+ mempool_free(req_chain, ctx->pool);
+ }
+out_free:
+ qat_unmap_input_params(req);
+ qat_unmap_output_params(req, 0);
+ mempool_free(req, ctx->pool);
+out:
+ release_sock(sk);
+ return ret;
+}
+
+static unsigned int qat_asym_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_asym_cy_ctx *ctx = ask->private;
+ unsigned int mask = 0;
+
+ if (unlikely(!ctx))
+ return 0;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
+ if (atomic_read(&ctx->data_available))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static int qat_asym_recvmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t ignored, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_asym_cy_ctx *ctx = ask->private;
+ struct qat_asym_algif_req *req, *req_chain;
+ struct list_head *list_ptr, *tmp;
+ struct list_head *list_ptr_chain, *tmp_chain;
+ struct iovec *iov = msg->msg_iov;
+ unsigned long iovlen = msg->msg_iovlen;
+ int ret = 0;
+
+ lock_sock(sk);
+
+ if (unlikely(!ctx || !ctx->pool))
+ goto out;
+
+ if (!atomic_read(&ctx->data_available))
+ goto out;
+
+ for (; iovlen > 0; iovlen--, iov++) {
+ unsigned long len = iov->iov_len;
+ char __user *ptr = iov->iov_base;
+ struct icp_qat_fw_pke_resp *resp;
+
+ list_for_each_safe(list_ptr, tmp, &ctx->queue) {
+ req = list_entry(list_ptr,
+ struct qat_asym_algif_req,
+ list);
+ if (!len)
+ break;
+ if (unlikely(len < sizeof(*resp))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!atomic_read(&req->done))
+ break;
+ resp = &req->resp;
+ if (copy_to_user(ptr, (void *)resp, sizeof(*resp))) {
+ pr_err("QAT: copy to user failed\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ list_for_each_safe(list_ptr_chain,
+ tmp_chain, &req->chain) {
+ req_chain =
+ list_entry(list_ptr_chain,
+ struct qat_asym_algif_req,
+ chain);
+ qat_unmap_input_params(req_chain);
+ qat_unmap_output_params(req_chain, 1);
+ list_del(list_ptr_chain);
+ mempool_free(req_chain, ctx->pool);
+ }
+ qat_unmap_input_params(req);
+ qat_unmap_output_params(req, 1);
+ list_del(list_ptr);
+ mempool_free(req, ctx->pool);
+ ret += sizeof(*resp);
+ len -= sizeof(*resp);
+ ptr += sizeof(*resp);
+ }
+ }
+out:
+ /* If something went wrong and there is still data
+ * ready to be read we need to set the data_available
+ * flag accordingly for the next poll to work */
+ atomic_set(&ctx->data_available, 0);
+ list_for_each(list_ptr, &ctx->queue) {
+ req = list_entry(list_ptr, struct qat_asym_algif_req, list);
+ if (atomic_read(&req->done)) {
+ atomic_set(&ctx->data_available, 1);
+ break;
+ }
+ }
+ release_sock(sk);
+ return ret;
+}
+
+static struct proto_ops algif_qat_asym_ops = {
+ .family = PF_ALG,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+ .sendpage = sock_no_sendpage,
+ .release = af_alg_release,
+ .sendmsg = qat_asym_sendmsg,
+ .recvmsg = qat_asym_recvmsg,
+ .poll = qat_asym_poll,
+};
+
+static void *qat_asym_bind(const char *name, u32 type, u32 mask)
+{
+ struct qat_crypto_instance *inst;
+ struct qat_algif_asym_cy_ctx *ctx;
+ struct device *dev;
+
+ if (strcmp(name, "asym_crypto"))
+ return ERR_PTR(-EINVAL);
+
+ inst = qat_crypto_get_user_instance();
+ if (!inst)
+ return ERR_PTR(-EFAULT);
+
+ dev = &GET_DEV(inst->accel_dev);
+ ctx = kzalloc_node(sizeof(*ctx), GFP_KERNEL, dev_to_node(dev));
+ if (!ctx)
+ goto err;
+
+ INIT_LIST_HEAD(&ctx->queue);
+ ctx->inst = inst;
+ if (qat_mempool_create_pool(ctx, 1024,
+ sizeof(struct qat_asym_algif_req)))
+ goto err2;
+ return ctx;
+err2:
+ kfree(ctx);
+err:
+ qat_crypto_put_instance(inst);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void qat_asym_release(void *private)
+{
+ struct qat_algif_asym_cy_ctx *ctx = private;
+ struct qat_asym_algif_req *req, *req_chain;
+ struct list_head *list_ptr, *tmp;
+ struct list_head *list_ptr_chain, *tmp_chain;
+ int ctr = 0;
+
+ if (!ctx)
+ return;
+
+ /* wait for outstanding requests */
+ while (!list_empty(&ctx->queue) && ctr++ < 100)
+ msleep(300);
+
+ list_for_each_safe(list_ptr, tmp, &ctx->queue) {
+ req = list_entry(list_ptr, struct qat_asym_algif_req, list);
+ list_for_each_safe(list_ptr_chain, tmp_chain, &req->chain) {
+ req_chain = list_entry(list_ptr_chain,
+ struct qat_asym_algif_req,
+ chain);
+ qat_unmap_input_params(req_chain);
+ qat_unmap_output_params(req_chain, 0);
+ list_del(list_ptr_chain);
+ mempool_free(req_chain, ctx->pool);
+ }
+ qat_unmap_input_params(req);
+ qat_unmap_output_params(req, 0);
+ list_del(list_ptr);
+ mempool_free(req, ctx->pool);
+ }
+ qat_mempool_destroy_pool(ctx);
+ qat_crypto_put_instance(ctx->inst);
+ memset(ctx, '\0', sizeof(*ctx));
+ kfree(ctx);
+}
+
+static void qat_asym_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct qat_algif_asym_cy_ctx *ctx = ask->private;
+
+ if (atomic_read(&ctx->data_available))
+ pr_info("QAT: still have data.\n");
+
+ af_alg_release_parent(sk);
+}
+
+static int qat_asym_accept(void *private, struct sock *sk)
+{
+ struct qat_algif_asym_cy_ctx *ctx = private;
+ struct alg_sock *ask = alg_sk(sk);
+
+ ask->private = ctx;
+ sk->sk_destruct = qat_asym_destruct;
+ return 0;
+}
+
+static const struct af_alg_type algif_type_qat_asym = {
+ .bind = qat_asym_bind,
+ .release = qat_asym_release,
+ .accept = qat_asym_accept,
+ .ops = &algif_qat_asym_ops,
+ .name = "qat_asym",
+ .owner = THIS_MODULE
+};
+
+int __init algif_qat_asym_init(void)
+{
+ return af_alg_register_type(&algif_type_qat_asym);
+}
+
+void __exit algif_qat_asym_exit(void)
+{
+ af_alg_unregister_type(&algif_type_qat_asym);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 2269fda..79c6b19 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -62,6 +62,7 @@
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+#include "qat_bufs.h"
#define QAT_AES_HW_CONFIG_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
@@ -75,19 +76,6 @@
static atomic_t active_dev;
-struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
-} __packed;
-
-struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed __aligned(64);
-
/* Common content descriptor */
struct qat_alg_cd {
union {
@@ -123,11 +111,6 @@ struct qat_alg_session_ctx {
spinlock_t lock; /* protects qat_alg_session_ctx struct */
};
-static int get_current_node(void)
-{
- return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
{
switch (qat_hash_alg) {
@@ -500,9 +483,8 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
sizeof(struct icp_qat_fw_la_bulk_req));
} else {
/* new key */
- int node = get_current_node();
struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(node);
+ qat_crypto_get_kernel_instance();
if (!inst) {
spin_unlock(&ctx->lock);
return -EINVAL;
diff --git a/drivers/crypto/qat/qat_common/qat_bufs.h b/drivers/crypto/qat/qat_common/qat_bufs.h
new file mode 100644
index 0000000..dcab80d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_bufs.h
@@ -0,0 +1,65 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_BUF_H_
+#define _QAT_BUF_H_
+
+#include <linux/types.h>
+
+struct qat_alg_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ uint64_t resrvd;
+ uint32_t num_bufs;
+ uint32_t num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+#endif
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-06 19:59 ` [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace Tadeusz Struk
@ 2014-11-07 2:56 ` Herbert Xu
2014-11-07 3:43 ` Tadeusz Struk
0 siblings, 1 reply; 15+ messages in thread
From: Herbert Xu @ 2014-11-07 2:56 UTC (permalink / raw)
To: Tadeusz Struk; +Cc: davem, linux-crypto, qat-linux
On Thu, Nov 06, 2014 at 11:59:53AM -0800, Tadeusz Struk wrote:
> Add new socket algif interface for userspace for symmetric and asymmetric
> crypto.
>
> Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
No this is not acceptable. algif is meant to expose generic
algorithms to user-space not a specific hardware implementation.
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 2:56 ` Herbert Xu
@ 2014-11-07 3:43 ` Tadeusz Struk
2014-11-07 4:05 ` Herbert Xu
0 siblings, 1 reply; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-07 3:43 UTC (permalink / raw)
To: Herbert Xu; +Cc: davem, linux-crypto, qat-linux
Hi Herbert,
On 11/06/2014 06:56 PM, Herbert Xu wrote:
>> Add new socket algif interface for userspace for symmetric and asymmetric
>> > crypto.
>> >
>> > Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
> No this is not acceptable. algif is meant to expose generic
> algorithms to user-space not a specific hardware implementation.
Yes, I know that PF_ALG wasn't designed to expose hardware specific
implementation, but the problem is that currently there is no interface
that allows using HW acceleration for algorithms such as RSA, DSA, or
Diffie-Hellman.
Using PF_ALG works perfectly for this purpose. I can get around 130K RSA
decrypt operations per second with 1024 bits modulus. This also scales
very well for more accelerators.
PF_ALG meant to be crypto interface for user space and this is what I'm
using it for here :) Is there any way I can make it (more) acceptable?
Regards,
Tadeusz
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 3:43 ` Tadeusz Struk
@ 2014-11-07 4:05 ` Herbert Xu
2014-11-07 5:24 ` Tadeusz Struk
0 siblings, 1 reply; 15+ messages in thread
From: Herbert Xu @ 2014-11-07 4:05 UTC (permalink / raw)
To: Tadeusz Struk; +Cc: davem, linux-crypto, qat-linux
On Thu, Nov 06, 2014 at 07:43:23PM -0800, Tadeusz Struk wrote:
>
> Yes, I know that PF_ALG wasn't designed to expose hardware specific
> implementation, but the problem is that currently there is no interface
> that allows using HW acceleration for algorithms such as RSA, DSA, or
> Diffie-Hellman.
> Using PF_ALG works perfectly for this purpose. I can get around 130K RSA
> decrypt operations per second with 1024 bits modulus. This also scales
> very well for more accelerators.
> PF_ALG meant to be crypto interface for user space and this is what I'm
> using it for here :) Is there any way I can make it (more) acceptable?
I see. What we need to do is first fold the current asymmetric
crypto code under crypto/asymmetric into the crypto API properly
and then export that through algif.
You can then simply implement a hardware driver for such algorithms
as you currently do for other crypto API algorithms.
I'll try to work on that.
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 4:05 ` Herbert Xu
@ 2014-11-07 5:24 ` Tadeusz Struk
2014-11-07 5:31 ` Herbert Xu
0 siblings, 1 reply; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-07 5:24 UTC (permalink / raw)
To: Herbert Xu; +Cc: davem, linux-crypto, qat-linux
On 11/06/2014 08:05 PM, Herbert Xu wrote:
> I see. What we need to do is first fold the current asymmetric
> crypto code under crypto/asymmetric into the crypto API properly
> and then export that through algif.
>
> You can then simply implement a hardware driver for such algorithms
> as you currently do for other crypto API algorithms.
>
> I'll try to work on that.
What about aead? Using my algif_qat I can build a single request to HW
that preforms both encryption and authentication in one go, ever for
things like aes-cbc-hmac-sha1. This allows me to get great performance
of authenticated encryption from user space.
Regards,
Tadeusz
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 5:24 ` Tadeusz Struk
@ 2014-11-07 5:31 ` Herbert Xu
2014-11-07 18:53 ` Tadeusz Struk
2014-11-07 19:48 ` Tadeusz Struk
0 siblings, 2 replies; 15+ messages in thread
From: Herbert Xu @ 2014-11-07 5:31 UTC (permalink / raw)
To: Tadeusz Struk; +Cc: davem, linux-crypto, qat-linux
On Thu, Nov 06, 2014 at 09:24:22PM -0800, Tadeusz Struk wrote:
>
> What about aead? Using my algif_qat I can build a single request to HW
> that preforms both encryption and authentication in one go, ever for
> things like aes-cbc-hmac-sha1. This allows me to get great performance
> of authenticated encryption from user space.
aead is simpler as it's already part of the API so we just need to
add algif_aead.
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 5:31 ` Herbert Xu
@ 2014-11-07 18:53 ` Tadeusz Struk
2014-11-07 19:48 ` Tadeusz Struk
1 sibling, 0 replies; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-07 18:53 UTC (permalink / raw)
To: Herbert Xu; +Cc: davem, linux-crypto, qat-linux
Hi Herbert,
On 11/06/2014 09:31 PM, Herbert Xu wrote:
> On Thu, Nov 06, 2014 at 09:24:22PM -0800, Tadeusz Struk wrote:
>>
>> What about aead? Using my algif_qat I can build a single request to HW
>> that preforms both encryption and authentication in one go, ever for
>> things like aes-cbc-hmac-sha1. This allows me to get great performance
>> of authenticated encryption from user space.
>
> aead is simpler as it's already part of the API so we just need to
> add algif_aead.
>
> Cheers,
>
For the asymmetric crypto below are the operations and algorithms the
QAT HW can accelerate.
* DH:
1. Diffie-Hellman phase 1 operations as defined in the PKCS #3 standard.
2. Diffie-Hellman phase 2 operations as defined in the PKCS #3 standard.
* DSA:
1. Generate DSA P Parameter - performs FIPS 186-3 Appendix A.1.1.2 steps
11.4 (c = X mod 2q) and 11.5 (p = X - (c - 1)), and 11.7 (Test whether p
is prime)
2. Generate DSA G Parameter - performs FIPS 186-3 Appendix A.2.1, steps
1 (e = (p - 1)/q), 3 g = h^e mod p, and 4
3. Generate DSA Y Parameter - performs FIPS 186-3 section 4.1 (y = g^x
mod p)
4. Generate DSA R Signature - performs FIPS 186-3 Section 4.6: r = (g^k
mod p) mod q
5. Generate DSA S Signature - performs FIPS 186-3 Section 4.6: s =
(k^-1(z + xr)) mod q
6. Generate DSA R and S Signatures - performs FIPS 186-3 Section 4.6: r
= (g^k mod p) mod q, s = (k^-1(z + xr)) mod q
7. Verify DSA R and S signatures - performs FIPS 186-3 Section 4.7:
w = (s')^-1 mod q
u1 = (zw) mod q
u2 = ((r')w) mod q
v = (((g)^u1 (y)^u2) mod p) mod q
* RSA:
1. Generate RSA keys - as specified in the PKCS #1 V2.1 standard
2. Perform the RSA encrypt (verify) primitive
3. Perform the RSA decrypt (sign) primitive
* EC:
1. Elliptic Curve Point Multiplication as defined in ANSI X9.63 Annex D.3.2
2. Elliptic Curve Point Verification as defined in ANSI X9.62 Annex
A.4.2 steps a, b and c.
3. ECDH Point Multiplication as defined in ANSI X9.63 2001 section 5.4
4. ECDSA generate signature R as per ANSI X9.62 2005 section 7.3
5. ECDSA generate signature S as per ANSI X9.62 2005 section 7.3.
6. ECDSA generate signature R & S as per ANSI X9.62 2005 section 7.3.
7. ECDSA Verify as per ANSI X9.62 2005 section 7.4
* The QAT HW can also perform modular exponentiation operation: r =
(base ^ exponent) mod modulus, and modular inversion operation: r =
(1/A) mod modulus
* It can also probabilistically test if a number is prime using GCD,
Fermat, Miller-Rabin, and Lucas testing methods using all combinations
of these methods:
(1x GCD) + (1x Fermat) + (up to 50x Miller-Rabin rounds) + (1x Lucas)
* and it can also accelerate SSL, TLS and TLSv2 key generation and MGF1
as per RFC3447, and Extended Mask Generation Function.
Are you planning to add all of these to the API?
Maybe it will be easier to add a generic af_alg_type e.g "af_direct" for
the HW drivers to use to implement HW specific functionality for the
stuff that is not yet (or never will) on the API?
I can try to put something together.
Regards,
Tadeusz
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 5:31 ` Herbert Xu
2014-11-07 18:53 ` Tadeusz Struk
@ 2014-11-07 19:48 ` Tadeusz Struk
2014-11-14 1:19 ` Herbert Xu
1 sibling, 1 reply; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-07 19:48 UTC (permalink / raw)
To: Herbert Xu; +Cc: davem, linux-crypto, qat-linux
Hi,
On 11/06/2014 09:31 PM, Herbert Xu wrote:
> On Thu, Nov 06, 2014 at 09:24:22PM -0800, Tadeusz Struk wrote:
>>
>> What about aead? Using my algif_qat I can build a single request to HW
>> that preforms both encryption and authentication in one go, ever for
>> things like aes-cbc-hmac-sha1. This allows me to get great performance
>> of authenticated encryption from user space.
>
> aead is simpler as it's already part of the API so we just need to
> add algif_aead.
>
> Cheers,
>
Another thing is that the algif_skcipher works in kind of synchronous way:
------- ----------
| app |--write()--> | socket | --> build_sgl()
------- ---------- |
<----------------------------------/
------- ---------- ------
| app |--read()---> | socket | --send_to_hw()--> | HW |
------- ---------- | ------
| |
| |
<-------------------------------wait() <--------/
What I do in the algif_qat is this:
------- ---------- ------
| app |--write()--> | socket | --send_to_hw()--> | HW |
------- ---------- | ------
<----------------------------------/ |
<-------------------------/
------- ----------
| app |--read()---> | socket |
------- ----------
|
<--------------------/
This way I can get much higher throughput than with algif_skcipher.
Regards,
Tadeusz
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-07 19:48 ` Tadeusz Struk
@ 2014-11-14 1:19 ` Herbert Xu
2014-11-14 1:25 ` Tadeusz Struk
0 siblings, 1 reply; 15+ messages in thread
From: Herbert Xu @ 2014-11-14 1:19 UTC (permalink / raw)
To: Tadeusz Struk; +Cc: davem, linux-crypto, qat-linux
On Fri, Nov 07, 2014 at 11:48:16AM -0800, Tadeusz Struk wrote:
>
> This way I can get much higher throughput than with algif_skcipher.
Yes that's a worthy optimisation. See if you can change the
current implementation (without changing the interface) to achieve
this.
Thanks,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-14 1:19 ` Herbert Xu
@ 2014-11-14 1:25 ` Tadeusz Struk
2014-11-14 1:29 ` Herbert Xu
0 siblings, 1 reply; 15+ messages in thread
From: Tadeusz Struk @ 2014-11-14 1:25 UTC (permalink / raw)
To: Herbert Xu; +Cc: davem, linux-crypto, qat-linux
Hi Herbert,
On 11/13/2014 05:19 PM, Herbert Xu wrote:
>> > This way I can get much higher throughput than with algif_skcipher.
> Yes that's a worthy optimisation. See if you can change the
> current implementation (without changing the interface) to achieve
> this.
Ok, I'll have a look. What about the asymmetric algorithms?
Regards,
Tadeusz
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH RFC 4/4] crypto: qat - Add new algif interface for userspace
2014-11-14 1:25 ` Tadeusz Struk
@ 2014-11-14 1:29 ` Herbert Xu
0 siblings, 0 replies; 15+ messages in thread
From: Herbert Xu @ 2014-11-14 1:29 UTC (permalink / raw)
To: Tadeusz Struk; +Cc: davem, linux-crypto, qat-linux
On Thu, Nov 13, 2014 at 05:25:29PM -0800, Tadeusz Struk wrote:
> Hi Herbert,
> On 11/13/2014 05:19 PM, Herbert Xu wrote:
> >> > This way I can get much higher throughput than with algif_skcipher.
> > Yes that's a worthy optimisation. See if you can change the
> > current implementation (without changing the interface) to achieve
> > this.
> Ok, I'll have a look. What about the asymmetric algorithms?
I will be taking a look at them but it'll be a while before I
get some free time to do so.
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 15+ messages in thread