qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
To: qemu-devel@nongnu.org, peter.maydell@linaro.org
Cc: yuval.shaia@oracle.com, marcel.apfelbaum@gmail.com, kamalheib1@gmail.com
Subject: [Qemu-devel]  [PATCH PULL 4/4] hw/pvrdma: Add support for SRQ
Date: Sat,  4 May 2019 16:45:29 +0300	[thread overview]
Message-ID: <20190504134529.4755-5-marcel.apfelbaum@gmail.com> (raw)
In-Reply-To: <20190504134529.4755-1-marcel.apfelbaum@gmail.com>

From: Kamal Heib <kamalheib1@gmail.com>

Implement the pvrdma device commands for supporting SRQ

Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Message-Id: <20190403113343.26384-5-kamalheib1@gmail.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
---
 hw/rdma/vmw/pvrdma_cmd.c    | 147 ++++++++++++++++++++++++++++++++++++
 hw/rdma/vmw/pvrdma_main.c   |  16 ++++
 hw/rdma/vmw/pvrdma_qp_ops.c |  46 ++++++++++-
 hw/rdma/vmw/pvrdma_qp_ops.h |   1 +
 4 files changed, 209 insertions(+), 1 deletion(-)

diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
index b931bb6dc9..8d70c0d23d 100644
--- a/hw/rdma/vmw/pvrdma_cmd.c
+++ b/hw/rdma/vmw/pvrdma_cmd.c
@@ -609,6 +609,149 @@ static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
     return 0;
 }
 
+static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
+                           uint64_t pdir_dma, uint32_t max_wr,
+                           uint32_t max_sge, uint32_t nchunks)
+{
+    uint64_t *dir = NULL, *tbl = NULL;
+    PvrdmaRing *r;
+    int rc = -EINVAL;
+    char ring_name[MAX_RING_NAME_SZ];
+    uint32_t wqe_sz;
+
+    if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
+        rdma_error_report("Got invalid page count for SRQ ring: %d",
+                          nchunks);
+        return rc;
+    }
+
+    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
+    if (!dir) {
+        rdma_error_report("Failed to map to SRQ page directory");
+        goto out;
+    }
+
+    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+    if (!tbl) {
+        rdma_error_report("Failed to map to SRQ page table");
+        goto out;
+    }
+
+    r = g_malloc(sizeof(*r));
+    *ring = r;
+
+    r->ring_state = (struct pvrdma_ring *)
+            rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+    if (!r->ring_state) {
+        rdma_error_report("Failed to map tp SRQ ring state");
+        goto out_free_ring_mem;
+    }
+
+    wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
+                      sizeof(struct pvrdma_sge) * max_sge - 1);
+    sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
+    rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
+                          wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
+    if (rc) {
+        goto out_unmap_ring_state;
+    }
+
+    goto out;
+
+out_unmap_ring_state:
+    rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
+
+out_free_ring_mem:
+    g_free(r);
+
+out:
+    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+    return rc;
+}
+
+static void destroy_srq_ring(PvrdmaRing *ring)
+{
+    pvrdma_ring_free(ring);
+    rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
+    g_free(ring);
+}
+
+static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                      union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
+    struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
+    PvrdmaRing *ring = NULL;
+    int rc;
+
+    memset(resp, 0, sizeof(*resp));
+
+    rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
+                         cmd->attrs.max_wr, cmd->attrs.max_sge,
+                         cmd->nchunks);
+    if (rc) {
+        return rc;
+    }
+
+    rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
+                           cmd->attrs.max_wr, cmd->attrs.max_sge,
+                           cmd->attrs.srq_limit, &resp->srqn, ring);
+    if (rc) {
+        destroy_srq_ring(ring);
+        return rc;
+    }
+
+    return 0;
+}
+
+static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                     union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
+    struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
+
+    memset(resp, 0, sizeof(*resp));
+
+    return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
+                             (struct ibv_srq_attr *)&resp->attrs);
+}
+
+static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                      union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
+
+    /* Only support SRQ limit */
+    if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
+        (cmd->attr_mask & IBV_SRQ_MAX_WR))
+            return -EINVAL;
+
+    return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
+                              (struct ibv_srq_attr *)&cmd->attrs,
+                              cmd->attr_mask);
+}
+
+static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                       union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
+    RdmaRmSRQ *srq;
+    PvrdmaRing *ring;
+
+    srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
+    if (!srq) {
+        return -EINVAL;
+    }
+
+    ring = (PvrdmaRing *)srq->opaque;
+    destroy_srq_ring(ring);
+    rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
+
+    return 0;
+}
+
 struct cmd_handler {
     uint32_t cmd;
     uint32_t ack;
@@ -634,6 +777,10 @@ static struct cmd_handler cmd_handlers[] = {
     {PVRDMA_CMD_DESTROY_UC,   PVRDMA_CMD_DESTROY_UC_RESP_NOOP,   destroy_uc},
     {PVRDMA_CMD_CREATE_BIND,  PVRDMA_CMD_CREATE_BIND_RESP_NOOP,  create_bind},
     {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
+    {PVRDMA_CMD_CREATE_SRQ,   PVRDMA_CMD_CREATE_SRQ_RESP,        create_srq},
+    {PVRDMA_CMD_QUERY_SRQ,    PVRDMA_CMD_QUERY_SRQ_RESP,         query_srq},
+    {PVRDMA_CMD_MODIFY_SRQ,   PVRDMA_CMD_MODIFY_SRQ_RESP,        modify_srq},
+    {PVRDMA_CMD_DESTROY_SRQ,  PVRDMA_CMD_DESTROY_SRQ_RESP,       destroy_srq},
 };
 
 int pvrdma_exec_cmd(PVRDMADev *dev)
diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
index 0b46561bad..769f7990f8 100644
--- a/hw/rdma/vmw/pvrdma_main.c
+++ b/hw/rdma/vmw/pvrdma_main.c
@@ -53,6 +53,7 @@ static Property pvrdma_dev_properties[] = {
     DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev,
                       dev_attr.max_qp_init_rd_atom, MAX_QP_INIT_RD_ATOM),
     DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev, dev_attr.max_ah, MAX_AH),
+    DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev, dev_attr.max_srq, MAX_SRQ),
     DEFINE_PROP_CHR("mad-chardev", PVRDMADev, mad_chr),
     DEFINE_PROP_END_OF_LIST(),
 };
@@ -261,6 +262,9 @@ static void init_dsr_dev_caps(PVRDMADev *dev)
     dsr->caps.max_mr = dev->dev_attr.max_mr;
     dsr->caps.max_pd = dev->dev_attr.max_pd;
     dsr->caps.max_ah = dev->dev_attr.max_ah;
+    dsr->caps.max_srq = dev->dev_attr.max_srq;
+    dsr->caps.max_srq_wr = dev->dev_attr.max_srq_wr;
+    dsr->caps.max_srq_sge = dev->dev_attr.max_srq_sge;
     dsr->caps.gid_tbl_len = MAX_GIDS;
     dsr->caps.sys_image_guid = 0;
     dsr->caps.node_guid = dev->node_guid;
@@ -485,6 +489,13 @@ static void pvrdma_uar_write(void *opaque, hwaddr addr, uint64_t val,
             pvrdma_cq_poll(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK);
         }
         break;
+    case PVRDMA_UAR_SRQ_OFFSET:
+        if (val & PVRDMA_UAR_SRQ_RECV) {
+            trace_pvrdma_uar_write(addr, val, "QP", "SRQ",
+                                   val & PVRDMA_UAR_HANDLE_MASK, 0);
+            pvrdma_srq_recv(dev, val & PVRDMA_UAR_HANDLE_MASK);
+        }
+        break;
     default:
         rdma_error_report("Unsupported command, addr=0x%"PRIx64", val=0x%"PRIx64,
                           addr, val);
@@ -554,6 +565,11 @@ static void init_dev_caps(PVRDMADev *dev)
 
     dev->dev_attr.max_cqe = pg_tbl_bytes / sizeof(struct pvrdma_cqe) -
                             TARGET_PAGE_SIZE; /* First page is ring state */
+
+    dev->dev_attr.max_srq_wr = pg_tbl_bytes /
+                                ((sizeof(struct pvrdma_rq_wqe_hdr) +
+                                sizeof(struct pvrdma_sge)) *
+                                dev->dev_attr.max_sge) - TARGET_PAGE_SIZE;
 }
 
 static int pvrdma_check_ram_shared(Object *obj, void *opaque)
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c
index 5b9786efbe..bd6db858de 100644
--- a/hw/rdma/vmw/pvrdma_qp_ops.c
+++ b/hw/rdma/vmw/pvrdma_qp_ops.c
@@ -70,7 +70,7 @@ static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle,
 
     memset(cqe1, 0, sizeof(*cqe1));
     cqe1->wr_id = cqe->wr_id;
-    cqe1->qp = cqe->qp;
+    cqe1->qp = cqe->qp ? cqe->qp : wc->qp_num;
     cqe1->opcode = cqe->opcode;
     cqe1->status = wc->status;
     cqe1->byte_len = wc->byte_len;
@@ -241,6 +241,50 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle)
     }
 }
 
+void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle)
+{
+    RdmaRmSRQ *srq;
+    PvrdmaRqWqe *wqe;
+    PvrdmaRing *ring;
+
+    srq = rdma_rm_get_srq(&dev->rdma_dev_res, srq_handle);
+    if (unlikely(!srq)) {
+        return;
+    }
+
+    ring = (PvrdmaRing *)srq->opaque;
+
+    wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
+    while (wqe) {
+        CompHandlerCtx *comp_ctx;
+
+        /* Prepare CQE */
+        comp_ctx = g_malloc(sizeof(CompHandlerCtx));
+        comp_ctx->dev = dev;
+        comp_ctx->cq_handle = srq->recv_cq_handle;
+        comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
+        comp_ctx->cqe.qp = 0;
+        comp_ctx->cqe.opcode = IBV_WC_RECV;
+
+        if (wqe->hdr.num_sge > dev->dev_attr.max_sge) {
+            rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge,
+                              dev->dev_attr.max_sge);
+            complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx);
+            continue;
+        }
+
+        rdma_backend_post_srq_recv(&dev->backend_dev, &srq->backend_srq,
+                                   (struct ibv_sge *)&wqe->sge[0],
+                                   wqe->hdr.num_sge,
+                                   comp_ctx);
+
+        pvrdma_ring_read_inc(ring);
+
+        wqe = pvrdma_ring_next_elem_read(ring);
+    }
+
+}
+
 void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle)
 {
     RdmaRmCQ *cq;
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.h b/hw/rdma/vmw/pvrdma_qp_ops.h
index 31cb48ba29..82e720a76f 100644
--- a/hw/rdma/vmw/pvrdma_qp_ops.h
+++ b/hw/rdma/vmw/pvrdma_qp_ops.h
@@ -22,6 +22,7 @@ int pvrdma_qp_ops_init(void);
 void pvrdma_qp_ops_fini(void);
 void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle);
 void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle);
+void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle);
 void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle);
 
 #endif
-- 
2.17.1

WARNING: multiple messages have this Message-ID (diff)
From: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
To: qemu-devel@nongnu.org, peter.maydell@linaro.org
Cc: kamalheib1@gmail.com, yuval.shaia@oracle.com
Subject: [Qemu-devel]  [PATCH PULL 4/4] hw/pvrdma: Add support for SRQ
Date: Sat,  4 May 2019 16:45:29 +0300	[thread overview]
Message-ID: <20190504134529.4755-5-marcel.apfelbaum@gmail.com> (raw)
Message-ID: <20190504134529.9AaMcIBUxEAcXkUK7EWcSbl6k9_Ri0UP_DaSDZV16iU@z> (raw)
In-Reply-To: <20190504134529.4755-1-marcel.apfelbaum@gmail.com>

From: Kamal Heib <kamalheib1@gmail.com>

Implement the pvrdma device commands for supporting SRQ

Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Message-Id: <20190403113343.26384-5-kamalheib1@gmail.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
---
 hw/rdma/vmw/pvrdma_cmd.c    | 147 ++++++++++++++++++++++++++++++++++++
 hw/rdma/vmw/pvrdma_main.c   |  16 ++++
 hw/rdma/vmw/pvrdma_qp_ops.c |  46 ++++++++++-
 hw/rdma/vmw/pvrdma_qp_ops.h |   1 +
 4 files changed, 209 insertions(+), 1 deletion(-)

diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
index b931bb6dc9..8d70c0d23d 100644
--- a/hw/rdma/vmw/pvrdma_cmd.c
+++ b/hw/rdma/vmw/pvrdma_cmd.c
@@ -609,6 +609,149 @@ static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
     return 0;
 }
 
+static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
+                           uint64_t pdir_dma, uint32_t max_wr,
+                           uint32_t max_sge, uint32_t nchunks)
+{
+    uint64_t *dir = NULL, *tbl = NULL;
+    PvrdmaRing *r;
+    int rc = -EINVAL;
+    char ring_name[MAX_RING_NAME_SZ];
+    uint32_t wqe_sz;
+
+    if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
+        rdma_error_report("Got invalid page count for SRQ ring: %d",
+                          nchunks);
+        return rc;
+    }
+
+    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
+    if (!dir) {
+        rdma_error_report("Failed to map to SRQ page directory");
+        goto out;
+    }
+
+    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+    if (!tbl) {
+        rdma_error_report("Failed to map to SRQ page table");
+        goto out;
+    }
+
+    r = g_malloc(sizeof(*r));
+    *ring = r;
+
+    r->ring_state = (struct pvrdma_ring *)
+            rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+    if (!r->ring_state) {
+        rdma_error_report("Failed to map tp SRQ ring state");
+        goto out_free_ring_mem;
+    }
+
+    wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
+                      sizeof(struct pvrdma_sge) * max_sge - 1);
+    sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
+    rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
+                          wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
+    if (rc) {
+        goto out_unmap_ring_state;
+    }
+
+    goto out;
+
+out_unmap_ring_state:
+    rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
+
+out_free_ring_mem:
+    g_free(r);
+
+out:
+    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+    return rc;
+}
+
+static void destroy_srq_ring(PvrdmaRing *ring)
+{
+    pvrdma_ring_free(ring);
+    rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
+    g_free(ring);
+}
+
+static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                      union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
+    struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
+    PvrdmaRing *ring = NULL;
+    int rc;
+
+    memset(resp, 0, sizeof(*resp));
+
+    rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
+                         cmd->attrs.max_wr, cmd->attrs.max_sge,
+                         cmd->nchunks);
+    if (rc) {
+        return rc;
+    }
+
+    rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
+                           cmd->attrs.max_wr, cmd->attrs.max_sge,
+                           cmd->attrs.srq_limit, &resp->srqn, ring);
+    if (rc) {
+        destroy_srq_ring(ring);
+        return rc;
+    }
+
+    return 0;
+}
+
+static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                     union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
+    struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
+
+    memset(resp, 0, sizeof(*resp));
+
+    return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
+                             (struct ibv_srq_attr *)&resp->attrs);
+}
+
+static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                      union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
+
+    /* Only support SRQ limit */
+    if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
+        (cmd->attr_mask & IBV_SRQ_MAX_WR))
+            return -EINVAL;
+
+    return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
+                              (struct ibv_srq_attr *)&cmd->attrs,
+                              cmd->attr_mask);
+}
+
+static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+                       union pvrdma_cmd_resp *rsp)
+{
+    struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
+    RdmaRmSRQ *srq;
+    PvrdmaRing *ring;
+
+    srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
+    if (!srq) {
+        return -EINVAL;
+    }
+
+    ring = (PvrdmaRing *)srq->opaque;
+    destroy_srq_ring(ring);
+    rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
+
+    return 0;
+}
+
 struct cmd_handler {
     uint32_t cmd;
     uint32_t ack;
@@ -634,6 +777,10 @@ static struct cmd_handler cmd_handlers[] = {
     {PVRDMA_CMD_DESTROY_UC,   PVRDMA_CMD_DESTROY_UC_RESP_NOOP,   destroy_uc},
     {PVRDMA_CMD_CREATE_BIND,  PVRDMA_CMD_CREATE_BIND_RESP_NOOP,  create_bind},
     {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
+    {PVRDMA_CMD_CREATE_SRQ,   PVRDMA_CMD_CREATE_SRQ_RESP,        create_srq},
+    {PVRDMA_CMD_QUERY_SRQ,    PVRDMA_CMD_QUERY_SRQ_RESP,         query_srq},
+    {PVRDMA_CMD_MODIFY_SRQ,   PVRDMA_CMD_MODIFY_SRQ_RESP,        modify_srq},
+    {PVRDMA_CMD_DESTROY_SRQ,  PVRDMA_CMD_DESTROY_SRQ_RESP,       destroy_srq},
 };
 
 int pvrdma_exec_cmd(PVRDMADev *dev)
diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
index 0b46561bad..769f7990f8 100644
--- a/hw/rdma/vmw/pvrdma_main.c
+++ b/hw/rdma/vmw/pvrdma_main.c
@@ -53,6 +53,7 @@ static Property pvrdma_dev_properties[] = {
     DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev,
                       dev_attr.max_qp_init_rd_atom, MAX_QP_INIT_RD_ATOM),
     DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev, dev_attr.max_ah, MAX_AH),
+    DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev, dev_attr.max_srq, MAX_SRQ),
     DEFINE_PROP_CHR("mad-chardev", PVRDMADev, mad_chr),
     DEFINE_PROP_END_OF_LIST(),
 };
@@ -261,6 +262,9 @@ static void init_dsr_dev_caps(PVRDMADev *dev)
     dsr->caps.max_mr = dev->dev_attr.max_mr;
     dsr->caps.max_pd = dev->dev_attr.max_pd;
     dsr->caps.max_ah = dev->dev_attr.max_ah;
+    dsr->caps.max_srq = dev->dev_attr.max_srq;
+    dsr->caps.max_srq_wr = dev->dev_attr.max_srq_wr;
+    dsr->caps.max_srq_sge = dev->dev_attr.max_srq_sge;
     dsr->caps.gid_tbl_len = MAX_GIDS;
     dsr->caps.sys_image_guid = 0;
     dsr->caps.node_guid = dev->node_guid;
@@ -485,6 +489,13 @@ static void pvrdma_uar_write(void *opaque, hwaddr addr, uint64_t val,
             pvrdma_cq_poll(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK);
         }
         break;
+    case PVRDMA_UAR_SRQ_OFFSET:
+        if (val & PVRDMA_UAR_SRQ_RECV) {
+            trace_pvrdma_uar_write(addr, val, "QP", "SRQ",
+                                   val & PVRDMA_UAR_HANDLE_MASK, 0);
+            pvrdma_srq_recv(dev, val & PVRDMA_UAR_HANDLE_MASK);
+        }
+        break;
     default:
         rdma_error_report("Unsupported command, addr=0x%"PRIx64", val=0x%"PRIx64,
                           addr, val);
@@ -554,6 +565,11 @@ static void init_dev_caps(PVRDMADev *dev)
 
     dev->dev_attr.max_cqe = pg_tbl_bytes / sizeof(struct pvrdma_cqe) -
                             TARGET_PAGE_SIZE; /* First page is ring state */
+
+    dev->dev_attr.max_srq_wr = pg_tbl_bytes /
+                                ((sizeof(struct pvrdma_rq_wqe_hdr) +
+                                sizeof(struct pvrdma_sge)) *
+                                dev->dev_attr.max_sge) - TARGET_PAGE_SIZE;
 }
 
 static int pvrdma_check_ram_shared(Object *obj, void *opaque)
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c
index 5b9786efbe..bd6db858de 100644
--- a/hw/rdma/vmw/pvrdma_qp_ops.c
+++ b/hw/rdma/vmw/pvrdma_qp_ops.c
@@ -70,7 +70,7 @@ static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle,
 
     memset(cqe1, 0, sizeof(*cqe1));
     cqe1->wr_id = cqe->wr_id;
-    cqe1->qp = cqe->qp;
+    cqe1->qp = cqe->qp ? cqe->qp : wc->qp_num;
     cqe1->opcode = cqe->opcode;
     cqe1->status = wc->status;
     cqe1->byte_len = wc->byte_len;
@@ -241,6 +241,50 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle)
     }
 }
 
+void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle)
+{
+    RdmaRmSRQ *srq;
+    PvrdmaRqWqe *wqe;
+    PvrdmaRing *ring;
+
+    srq = rdma_rm_get_srq(&dev->rdma_dev_res, srq_handle);
+    if (unlikely(!srq)) {
+        return;
+    }
+
+    ring = (PvrdmaRing *)srq->opaque;
+
+    wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
+    while (wqe) {
+        CompHandlerCtx *comp_ctx;
+
+        /* Prepare CQE */
+        comp_ctx = g_malloc(sizeof(CompHandlerCtx));
+        comp_ctx->dev = dev;
+        comp_ctx->cq_handle = srq->recv_cq_handle;
+        comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
+        comp_ctx->cqe.qp = 0;
+        comp_ctx->cqe.opcode = IBV_WC_RECV;
+
+        if (wqe->hdr.num_sge > dev->dev_attr.max_sge) {
+            rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge,
+                              dev->dev_attr.max_sge);
+            complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx);
+            continue;
+        }
+
+        rdma_backend_post_srq_recv(&dev->backend_dev, &srq->backend_srq,
+                                   (struct ibv_sge *)&wqe->sge[0],
+                                   wqe->hdr.num_sge,
+                                   comp_ctx);
+
+        pvrdma_ring_read_inc(ring);
+
+        wqe = pvrdma_ring_next_elem_read(ring);
+    }
+
+}
+
 void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle)
 {
     RdmaRmCQ *cq;
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.h b/hw/rdma/vmw/pvrdma_qp_ops.h
index 31cb48ba29..82e720a76f 100644
--- a/hw/rdma/vmw/pvrdma_qp_ops.h
+++ b/hw/rdma/vmw/pvrdma_qp_ops.h
@@ -22,6 +22,7 @@ int pvrdma_qp_ops_init(void);
 void pvrdma_qp_ops_fini(void);
 void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle);
 void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle);
+void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle);
 void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle);
 
 #endif
-- 
2.17.1



  parent reply	other threads:[~2019-05-04 13:45 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-04 13:45 [Qemu-devel] [PATCH PULL 0/4] RDMA queue Marcel Apfelbaum
2019-05-04 13:45 ` Marcel Apfelbaum
2019-05-04 13:45 ` [Qemu-devel] [PATCH PULL 1/4] hw/rdma: Add SRQ support to backend layer Marcel Apfelbaum
2019-05-04 13:45   ` Marcel Apfelbaum
2019-05-04 13:45 ` [Qemu-devel] [PATCH PULL 2/4] hw/rdma: Add support for managing SRQ resource Marcel Apfelbaum
2019-05-04 13:45   ` Marcel Apfelbaum
2019-05-04 13:45 ` [Qemu-devel] [PATCH PULL 3/4] hw/rdma: Modify create/destroy QP to support SRQ Marcel Apfelbaum
2019-05-04 13:45   ` Marcel Apfelbaum
2019-05-04 13:45 ` Marcel Apfelbaum [this message]
2019-05-04 13:45   ` [Qemu-devel] [PATCH PULL 4/4] hw/pvrdma: Add support for SRQ Marcel Apfelbaum
2019-05-07 16:15 ` [Qemu-devel] [PATCH PULL 0/4] RDMA queue Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190504134529.4755-5-marcel.apfelbaum@gmail.com \
    --to=marcel.apfelbaum@gmail.com \
    --cc=kamalheib1@gmail.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=yuval.shaia@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).