From: Stefan Hajnoczi <stefanha@gmail.com>
To: Gonglei <arei.gonglei@huawei.com>
Cc: qemu-devel@nongnu.org, virtio-dev@lists.oasis-open.org,
weidong.huang@huawei.com, claudio.fontana@huawei.com,
mst@redhat.com, xin.zeng@intel.com, hanweidong@huawei.com,
luonengjun@huawei.com, agraf@suse.de, nmorey@kalray.eu,
mike.caraman@nxp.com, stefanha@redhat.com,
jianjay.zhou@huawei.com, pbonzini@redhat.com,
peter.huangpeng@huawei.com, vincent.jardin@6wind.com,
wu.wubin@huawei.com, arei.gonglei@hotmail.com
Subject: Re: [Qemu-devel] [PATCH v7 09/12] virtio-crypto: add data queue processing handler
Date: Sun, 16 Oct 2016 14:22:56 +0100 [thread overview]
Message-ID: <20161016132256.GB13294@stefanha-x1.localdomain> (raw)
In-Reply-To: <1476342726-104488-10-git-send-email-arei.gonglei@huawei.com>
[-- Attachment #1: Type: text/plain, Size: 15991 bytes --]
On Thu, Oct 13, 2016 at 03:12:03PM +0800, Gonglei wrote:
> Introduces VirtIOCryptoReq structure to store
> crypto request so that we can support sync and async
> crypto operation in the future.
What do you mean by "sync and async" operations?
>
> At present, we only support cipher and algorithm
> chainning.
s/chainning/chaining/
>
> Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> ---
> hw/virtio/virtio-crypto.c | 338 +++++++++++++++++++++++++++++++++++++-
> include/hw/virtio/virtio-crypto.h | 10 +-
> 2 files changed, 345 insertions(+), 3 deletions(-)
>
> diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> index db86796..094bc48 100644
> --- a/hw/virtio/virtio-crypto.c
> +++ b/hw/virtio/virtio-crypto.c
> @@ -306,6 +306,342 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
> } /* end for loop */
> }
>
> +static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
> + VirtIOCryptoReq *req)
> +{
> + req->vcrypto = vcrypto;
> + req->vq = vq;
> + req->in = NULL;
> + req->in_iov = NULL;
> + req->in_num = 0;
> + req->in_len = 0;
> + req->flags = CRYPTODEV_BACKEND_ALG__MAX;
> + req->u.sym_op_info = NULL;
> +}
> +
> +static void virtio_crypto_free_request(VirtIOCryptoReq *req)
> +{
> + if (req) {
> + if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> + g_free(req->u.sym_op_info);
> + }
> + g_free(req);
> + }
> +}
> +
> +static void
> +virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
> + VirtIOCryptoReq *req,
> + uint32_t status,
> + CryptoDevBackendSymOpInfo *sym_op_info)
> +{
> + size_t s, len;
> +
> + if (status != VIRTIO_CRYPTO_OK) {
> + return;
> + }
> +
> + len = sym_op_info->dst_len;
> + /* Save the cipher result */
> + s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
> + if (s != len) {
> + virtio_error(vdev, "virtio-crypto dest data incorrect");
> + return;
> + }
> +
> + iov_discard_front(&req->in_iov, &req->in_num, len);
> +
> + if (sym_op_info->op_type ==
> + VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> + /* Save the digest result */
> + s = iov_from_buf(req->in_iov, req->in_num, 0,
> + sym_op_info->digest_result,
> + sym_op_info->digest_result_len);
> + if (s != sym_op_info->digest_result_len) {
> + virtio_error(vdev, "virtio-crypto digest result incorrect");
> + }
> + }
> +}
> +
> +static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint32_t status)
> +{
> + VirtIOCrypto *vcrypto = req->vcrypto;
> + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +
> + if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> + virtio_crypto_sym_input_data_helper(vdev, req, status,
> + req->u.sym_op_info);
> + }
> + stl_he_p(&req->in->status, status);
This should be virtio_stl_p().
> + virtqueue_push(req->vq, &req->elem, req->in_len);
> + virtio_notify(vdev, req->vq);
> +}
> +
> +static VirtIOCryptoReq *
> +virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
> +{
> + VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
> +
> + if (req) {
> + virtio_crypto_init_request(s, vq, req);
> + }
> + return req;
> +}
> +
> +static CryptoDevBackendSymOpInfo *
> +virtio_crypto_sym_op_helper(VirtIODevice *vdev,
> + struct virtio_crypto_cipher_para *para,
> + uint32_t aad_len,
> + struct iovec *iov, unsigned int out_num,
> + uint32_t hash_result_len,
> + uint32_t hash_start_src_offset)
> +{
> + CryptoDevBackendSymOpInfo *op_info;
> + uint32_t src_len, dst_len;
> + uint32_t iv_len;
> + size_t max_len, curr_size = 0;
> + size_t s;
> +
> + iv_len = virtio_ldl_p(vdev, ¶->iv_len);
> + src_len = virtio_ldl_p(vdev, ¶->src_data_len);
> + dst_len = virtio_ldl_p(vdev, ¶->dst_data_len);
> +
> + max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
Integer overflow checks are needed here to prevent memory corruption
later on.
Imagine a 32-bit host where sizeof(max_len) == 4 and iv_len + aad_len +
... == UINT32_MAX + 1. The malloc below will allocate just 1 byte
instead of UINT32_MAX + 1 due to overflow.
> + op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
> + op_info->iv_len = iv_len;
> + op_info->src_len = src_len;
> + op_info->dst_len = dst_len;
> + op_info->aad_len = aad_len;
> + op_info->digest_result_len = hash_result_len;
> + op_info->hash_start_src_offset = hash_start_src_offset;
> + /* Handle the initilization vector */
> + if (op_info->iv_len > 0) {
> + DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
> + op_info->iv = op_info->data + curr_size;
> +
> + s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
> + if (unlikely(s != op_info->iv_len)) {
> + virtio_error(vdev, "virtio-crypto iv incorrect");
> + goto err;
> + }
> + iov_discard_front(&iov, &out_num, op_info->iv_len);
> + curr_size += op_info->iv_len;
> + }
> +
> + /* Handle additional authentication data if exist */
> + if (op_info->aad_len > 0) {
> + DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
> + op_info->aad_data = op_info->data + curr_size;
> +
> + s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
> + if (unlikely(s != op_info->aad_len)) {
> + virtio_error(vdev, "virtio-crypto additional auth data incorrect");
> + goto err;
> + }
> + iov_discard_front(&iov, &out_num, op_info->aad_len);
> +
> + curr_size += op_info->aad_len;
> + }
> +
> + /* Handle the source data */
> + if (op_info->src_len > 0) {
> + DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
> + op_info->src = op_info->data + curr_size;
> +
> + s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
> + if (unlikely(s != op_info->src_len)) {
> + virtio_error(vdev, "virtio-crypto source data incorrect");
> + goto err;
> + }
> + iov_discard_front(&iov, &out_num, op_info->src_len);
> +
> + curr_size += op_info->src_len;
> + }
> +
> + /* Handle the destination data */
> + op_info->dst = op_info->data + curr_size;
> + curr_size += op_info->dst_len;
> +
> + DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
> +
> + /* Handle the hash digest result */
> + if (hash_result_len > 0) {
> + DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
> + op_info->digest_result = op_info->data + curr_size;
> + }
> +
> + return op_info;
> +
> +err:
> + g_free(op_info);
> + return NULL;
> +}
> +
> +static int
> +virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
> + struct virtio_crypto_sym_data_req *req,
> + CryptoDevBackendSymOpInfo **sym_op_info,
> + struct iovec *iov, unsigned int out_num)
> +{
> + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> + uint32_t op_type;
> + CryptoDevBackendSymOpInfo *op_info;
> +
> + op_type = virtio_ldl_p(vdev, &req->op_type);
> +
> + if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
> + op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
> + 0, iov, out_num, 0, 0);
> + if (!op_info) {
> + return -EFAULT;
> + }
> + op_info->op_type = op_type;
> + } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> + uint32_t aad_len, hash_result_len;
> + uint32_t hash_start_src_offset;
> +
> + aad_len = virtio_ldl_p(vdev, &req->u.chain.para.aad_len);
> + hash_result_len = virtio_ldl_p(vdev,
> + &req->u.chain.para.hash_result_len);
> + hash_start_src_offset = virtio_ldl_p(vdev,
> + &req->u.chain.para.hash_start_src_offset);
> + /* cipher part */
> + op_info = virtio_crypto_sym_op_helper(vdev, &req->u.chain.para.cipher,
> + aad_len, iov, out_num,
> + hash_result_len,
> + hash_start_src_offset);
> + if (!op_info) {
> + return -EFAULT;
> + }
> + op_info->op_type = op_type;
> + } else {
> + /* VIRTIO_CRYPTO_SYM_OP_NONE */
> + error_report("virtio-crypto unsupported cipher type");
> + return -VIRTIO_CRYPTO_NOTSUPP;
> + }
> +
> + *sym_op_info = op_info;
> +
> + return 0;
> +}
> +
> +static int
> +virtio_crypto_handle_request(VirtIOCryptoReq *request)
> +{
> + VirtIOCrypto *vcrypto = request->vcrypto;
> + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> + VirtQueueElement *elem = &request->elem;
> + int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
> + struct virtio_crypto_op_data_req req;
> + int ret;
> + struct iovec *in_iov;
> + struct iovec *out_iov;
> + unsigned in_num;
> + unsigned out_num;
> + uint32_t opcode, status = VIRTIO_CRYPTO_ERR;
> + uint64_t session_id;
> + CryptoDevBackendSymOpInfo *sym_op_info = NULL;
> + Error *local_err = NULL;
> +
> + if (elem->out_num < 1 || elem->in_num < 1) {
> + virtio_error(vdev, "virtio-crypto dataq missing headers");
> + return -1;
> + }
> +
> + out_num = elem->out_num;
> + out_iov = elem->out_sg;
> + in_num = elem->in_num;
> + in_iov = elem->in_sg;
> + if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
> + != sizeof(req))) {
> + virtio_error(vdev, "virtio-crypto request outhdr too short");
> + return -1;
> + }
> + iov_discard_front(&out_iov, &out_num, sizeof(req));
> +
> + if (in_iov[in_num - 1].iov_len <
> + sizeof(struct virtio_crypto_inhdr)) {
> + virtio_error(vdev, "virtio-crypto request inhdr too short");
> + return -1;
> + }
> +
> + request->in_len = iov_size(in_iov, in_num);
> + request->in = (void *)in_iov[in_num - 1].iov_base
> + + in_iov[in_num - 1].iov_len
> + - sizeof(struct virtio_crypto_inhdr);
> + iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
> +
> + /*
> + * The length of operation result, including dest_data
> + * and digest_result if exist.
> + */
> + request->in_num = in_num;
> + request->in_iov = in_iov;
> +
> + opcode = virtio_ldl_p(vdev, &req.header.opcode);
> + session_id = virtio_ldq_p(vdev, &req.header.session_id);
> +
> + switch (opcode) {
> + case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
> + case VIRTIO_CRYPTO_CIPHER_DECRYPT:
> + ret = virtio_crypto_handle_sym_req(vcrypto,
> + &req.u.sym_req,
> + &sym_op_info,
> + out_iov, out_num);
> + /* Serious errors, need to reset virtio crypto device */
> + if (ret == -EFAULT) {
> + return -1;
> + } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
> + virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> + virtio_crypto_free_request(request);
> + } else {
> + sym_op_info->session_id = session_id;
> +
> + /* Set request's parameter */
> + request->flags = CRYPTODEV_BACKEND_ALG_SYM;
> + request->u.sym_op_info = sym_op_info;
> + ret = cryptodev_backend_sym_operation(vcrypto->cryptodev,
> + sym_op_info, queue_index, &local_err);
> + if (ret < 0) {
> + status = VIRTIO_CRYPTO_ERR;
> + if (local_err) {
> + error_report_err(local_err);
> + }
> + } else { /* ret >= 0 */
> + status = VIRTIO_CRYPTO_OK;
> + }
> + virtio_crypto_req_complete(request, status);
> + virtio_crypto_free_request(request);
Shouldn't the operation be asynchronous from the start? There is no
point in having a 1024 element virtqueue if you can only process one
element at a time.
> + }
> + break;
> + case VIRTIO_CRYPTO_HASH:
> + case VIRTIO_CRYPTO_MAC:
> + case VIRTIO_CRYPTO_AEAD_ENCRYPT:
> + case VIRTIO_CRYPTO_AEAD_DECRYPT:
> + default:
> + error_report("virtio-crypto unsupported dataq opcode: %u",
> + opcode);
> + virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> + virtio_crypto_free_request(request);
> + }
> +
> + return 0;
> +}
> +
> +static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
> +{
> + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
> + VirtIOCryptoReq *req;
> +
> + while ((req = virtio_crypto_get_request(vcrypto, vq))) {
> + if (virtio_crypto_handle_request(req) < 0) {
> + virtqueue_detach_element(req->vq, &req->elem, 0);
> + virtio_crypto_free_request(req);
> + break;
> + }
> + }
> +}
> +
> static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
> uint64_t features,
> Error **errp)
> @@ -365,7 +701,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
> vcrypto->curr_queues = 1;
>
> for (i = 0; i < vcrypto->max_queues; i++) {
> - virtio_add_queue(vdev, 1024, NULL);
> + virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq);
> }
>
> vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
> diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
> index a5e826c..1427d39 100644
> --- a/include/hw/virtio/virtio-crypto.h
> +++ b/include/hw/virtio/virtio-crypto.h
> @@ -36,6 +36,10 @@ do { \
> #define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
> OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
>
> +/* This is the last element of the write scatter-gather list */
This comment is misleading. The last element of the scatter-gather list
could contain any number of other structs too. Remember the
specification says virtio devices cannot rely on specific iovec
boundaries.
> +struct virtio_crypto_inhdr {
> + uint32_t status;
> +};
Why is this defined here? I think it should be in virtio_crypto.h and
the field type should be __virtio32.
>
> typedef struct VirtIOCryptoConf {
> CryptoDevBackend *cryptodev;
> @@ -61,8 +65,10 @@ typedef struct VirtIOCryptoReq {
> VirtQueueElement elem;
> /* flags of operation, such as type of algorithm */
> uint32_t flags;
> - /* address of in data (Device to Driver) */
> - void *idata_hva;
This field was unused. Please remove its definition and wait until this
patch to define it properly.
This way the code is easier to review because there are no unused fields
that the reviewer has to guess about when reading the code.
> + struct virtio_crypto_inhdr *in;
> + struct iovec *in_iov; /* Head address of dest iovec */
> + unsigned int in_num; /* Number of dest iovec */
> + size_t in_len;
> VirtQueue *vq;
> struct VirtIOCrypto *vcrypto;
> union {
> --
> 1.8.3.1
>
>
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]
next prev parent reply other threads:[~2016-10-16 13:23 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-10-13 7:11 [Qemu-devel] [PATCH v7 00/12] virtio-crypto: introduce framework and device emulation Gonglei
2016-10-13 7:11 ` [Qemu-devel] [PATCH v7 01/12] cryptodev: introduce cryptodev backend interface Gonglei
2016-10-13 7:11 ` [Qemu-devel] [PATCH v7 02/12] cryptodev: add symmetric algorithm operation stuff Gonglei
2016-10-13 7:11 ` [Qemu-devel] [PATCH v7 03/12] virtio-crypto: introduce virtio_crypto.h Gonglei
2016-10-13 7:11 ` [Qemu-devel] [PATCH v7 04/12] cryptodev: introduce a new cryptodev backend Gonglei
2016-10-13 7:11 ` [Qemu-devel] [PATCH v7 05/12] virtio-crypto: add virtio crypto device emulation Gonglei
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 06/12] virtio-crypto-pci: add virtio crypto pci support Gonglei
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 07/12] virtio-crypto: set capacity of algorithms supported Gonglei
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 08/12] virtio-crypto: add control queue handler Gonglei
2016-10-16 13:02 ` Stefan Hajnoczi
2016-10-17 5:58 ` Gonglei (Arei)
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 09/12] virtio-crypto: add data queue processing handler Gonglei
2016-10-16 13:22 ` Stefan Hajnoczi [this message]
2016-10-17 6:29 ` Gonglei (Arei)
2016-10-18 10:08 ` [Qemu-devel] [virtio-dev] " Stefan Hajnoczi
2016-10-19 3:43 ` Gonglei (Arei)
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 10/12] cryptodev: introduce an unified wrapper for crypto operation Gonglei
2016-10-16 13:26 ` Stefan Hajnoczi
2016-10-17 6:32 ` Gonglei (Arei)
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 11/12] virtio-crypto: add myself as virtio-crypto and cryptodev backends maintainer Gonglei
2016-10-13 7:12 ` [Qemu-devel] [PATCH v7 12/12] virtio-crypto: perfect algorithms chainning support Gonglei
2016-10-16 13:30 ` Stefan Hajnoczi
2016-10-17 6:33 ` Gonglei (Arei)
2016-10-13 8:11 ` [Qemu-devel] [PATCH v7 00/12] virtio-crypto: introduce framework and device emulation no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20161016132256.GB13294@stefanha-x1.localdomain \
--to=stefanha@gmail.com \
--cc=agraf@suse.de \
--cc=arei.gonglei@hotmail.com \
--cc=arei.gonglei@huawei.com \
--cc=claudio.fontana@huawei.com \
--cc=hanweidong@huawei.com \
--cc=jianjay.zhou@huawei.com \
--cc=luonengjun@huawei.com \
--cc=mike.caraman@nxp.com \
--cc=mst@redhat.com \
--cc=nmorey@kalray.eu \
--cc=pbonzini@redhat.com \
--cc=peter.huangpeng@huawei.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vincent.jardin@6wind.com \
--cc=virtio-dev@lists.oasis-open.org \
--cc=weidong.huang@huawei.com \
--cc=wu.wubin@huawei.com \
--cc=xin.zeng@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).