qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: elena.ufimtseva@oracle.com
To: qemu-devel@nongnu.org
Cc: elena.ufimtseva@oracle.com, fam@euphon.net,
	swapnil.ingle@nutanix.com, john.g.johnson@oracle.com,
	kraxel@redhat.com, jag.raman@oracle.com, quintela@redhat.com,
	mst@redhat.com, armbru@redhat.com, kanth.ghatraju@oracle.com,
	felipe@nutanix.com, thuth@redhat.com, ehabkost@redhat.com,
	konrad.wilk@oracle.com, dgilbert@redhat.com, stefanha@redhat.com,
	thanos.makatos@nutanix.com, rth@twiddle.net, kwolf@redhat.com,
	berrange@redhat.com, mreitz@redhat.com,
	ross.lagerwall@citrix.com, marcandre.lureau@gmail.com,
	pbonzini@redhat.com
Subject: [PATCH v9 14/20] multi-process: PCI BAR read/write handling for proxy & remote endpoints
Date: Thu, 27 Aug 2020 11:12:25 -0700	[thread overview]
Message-ID: <20200827181231.22778-15-elena.ufimtseva@oracle.com> (raw)
In-Reply-To: <20200827181231.22778-1-elena.ufimtseva@oracle.com>

From: Jagannathan Raman <jag.raman@oracle.com>

Proxy device object implements handler for PCI BAR writes and reads.
The handler uses BAR_WRITE/BAR_READ message to communicate to the
remote process with the BAR address and value to be written/read.
The remote process implements handler for BAR_WRITE/BAR_READ
message.

Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
---
 hw/i386/remote-msg.c     | 94 ++++++++++++++++++++++++++++++++++++++++
 hw/pci/proxy.c           | 64 ++++++++++++++++++++++++++-
 include/hw/pci/proxy.h   | 16 ++++++-
 include/io/mpqemu-link.h | 10 +++++
 io/mpqemu-link.c         |  6 +++
 5 files changed, 186 insertions(+), 4 deletions(-)

diff --git a/hw/i386/remote-msg.c b/hw/i386/remote-msg.c
index 322c1888a3..841c681d76 100644
--- a/hw/i386/remote-msg.c
+++ b/hw/i386/remote-msg.c
@@ -16,11 +16,14 @@
 #include "qapi/error.h"
 #include "sysemu/runstate.h"
 #include "hw/pci/pci.h"
+#include "exec/memattrs.h"
 
 static void process_config_write(QIOChannel *ioc, PCIDevice *dev,
                                  MPQemuMsg *msg);
 static void process_config_read(QIOChannel *ioc, PCIDevice *dev,
                                 MPQemuMsg *msg);
+static void process_bar_write(QIOChannel *ioc, MPQemuMsg *msg, Error **errp);
+static void process_bar_read(QIOChannel *ioc, MPQemuMsg *msg, Error **errp);
 
 gboolean mpqemu_process_msg(QIOChannel *ioc, GIOCondition cond,
                             gpointer opaque)
@@ -62,6 +65,12 @@ gboolean mpqemu_process_msg(QIOChannel *ioc, GIOCondition cond,
     case PCI_CONFIG_READ:
         process_config_read(ioc, pci_dev, &msg);
         break;
+    case BAR_WRITE:
+        process_bar_write(ioc, &msg, &local_err);
+        break;
+    case BAR_READ:
+        process_bar_read(ioc, &msg, &local_err);
+        break;
     default:
         error_setg(&local_err,
                    "Unknown command (%d) received for device %s (pid=%d)",
@@ -134,3 +143,88 @@ static void process_config_read(QIOChannel *ioc, PCIDevice *dev,
                      getpid());
     }
 }
+
+static void process_bar_write(QIOChannel *ioc, MPQemuMsg *msg, Error **errp)
+{
+    BarAccessMsg *bar_access = &msg->data.bar_access;
+    AddressSpace *as =
+        bar_access->memory ? &address_space_memory : &address_space_io;
+    MPQemuMsg ret = { 0 };
+    MPQemuRequest req = { 0 };
+    MemTxResult res;
+    uint64_t val;
+    Error *local_err = NULL;
+
+    if (!is_power_of_2(bar_access->size) ||
+       (bar_access->size > sizeof(uint64_t))) {
+        ret.data.u64 = UINT64_MAX;
+        goto fail;
+    }
+
+    val = cpu_to_le64(bar_access->val);
+
+    res = address_space_rw(as, bar_access->addr, MEMTXATTRS_UNSPECIFIED,
+                           (void *)&val, bar_access->size, true);
+
+    if (res != MEMTX_OK) {
+        error_setg(errp, "Could not perform address space write operation,"
+                   " inaccessible address: %lx in pid %d.",
+                   bar_access->addr, getpid());
+        ret.data.u64 = -1;
+    }
+
+fail:
+    ret.cmd = RET_MSG;
+    ret.size = sizeof(ret.data.u64);
+
+    req.ioc = ioc;
+    req.msg = &ret;
+
+    mpqemu_msg_send_in_co(&req, ioc, (errp && *errp) ? NULL : &local_err);
+    if (local_err) {
+        error_setg(errp, "Error while sending message to proxy "
+                   "in remote process pid=%d", getpid());
+    }
+}
+
+static void process_bar_read(QIOChannel *ioc, MPQemuMsg *msg, Error **errp)
+{
+    BarAccessMsg *bar_access = &msg->data.bar_access;
+    MPQemuMsg ret = { 0 };
+    MPQemuRequest req = { 0 };
+    AddressSpace *as;
+    MemTxResult res;
+    uint64_t val = 0;
+    Error *local_err = NULL;
+
+    as = bar_access->memory ? &address_space_memory : &address_space_io;
+
+    if (!is_power_of_2(bar_access->size) ||
+       (bar_access->size > sizeof(uint64_t))) {
+        val = UINT64_MAX;
+        goto fail;
+    }
+
+    res = address_space_rw(as, bar_access->addr, MEMTXATTRS_UNSPECIFIED,
+                           (void *)&val, bar_access->size, false);
+
+    if (res != MEMTX_OK) {
+        error_setg(errp, "Could not perform address space read operation,"
+                   " inaccessible address: %lx in pid %d.",
+                   bar_access->addr, getpid());
+        val = UINT64_MAX;
+    }
+
+fail:
+    ret.cmd = RET_MSG;
+    ret.data.u64 = le64_to_cpu(val);
+    ret.size = sizeof(ret.data.u64);
+
+    req.ioc = ioc;
+    req.msg = &ret;
+    mpqemu_msg_send_in_co(&req, ioc, (errp && *errp) ? NULL : &local_err);
+    if (local_err) {
+        error_setg(errp, "Error while sending message to proxy "
+                   "in remote process pid=%d", getpid());
+    }
+}
diff --git a/hw/pci/proxy.c b/hw/pci/proxy.c
index 23aab44d8e..d332c63bf3 100644
--- a/hw/pci/proxy.c
+++ b/hw/pci/proxy.c
@@ -61,7 +61,7 @@ static int config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val,
                           int l, unsigned int op)
 {
     MPQemuMsg msg = { 0 };
-    long ret = -EINVAL;
+    uint64_t ret = -EINVAL;
     Error *local_err = NULL;
 
     msg.cmd = op;
@@ -72,7 +72,7 @@ static int config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val,
 
     ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err);
     if (local_err) {
-        error_report("Failed to exchange PCI_CONFIG message with remote");
+        error_report_err(local_err);
     }
     if (op == PCI_CONFIG_READ) {
         *val = (uint32_t)ret;
@@ -132,3 +132,63 @@ static void pci_proxy_dev_register_types(void)
 }
 
 type_init(pci_proxy_dev_register_types)
+
+static void send_bar_access_msg(PCIProxyDev *pdev, MemoryRegion *mr,
+                                bool write, hwaddr addr, uint64_t *val,
+                                unsigned size, bool memory)
+{
+    MPQemuMsg msg = { 0 };
+    long ret = -EINVAL;
+    Error *local_err = NULL;
+
+    msg.size = sizeof(BarAccessMsg);
+    msg.data.bar_access.addr = mr->addr + addr;
+    msg.data.bar_access.size = size;
+    msg.data.bar_access.memory = memory;
+
+    if (write) {
+        msg.cmd = BAR_WRITE;
+        msg.data.bar_access.val = *val;
+    } else {
+        msg.cmd = BAR_READ;
+    }
+
+    ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err);
+    if (local_err) {
+        error_report_err(local_err);
+    }
+
+    if (!write) {
+        *val = ret;
+    }
+}
+
+static void proxy_bar_write(void *opaque, hwaddr addr, uint64_t val,
+                            unsigned size)
+{
+    ProxyMemoryRegion *pmr = opaque;
+
+    send_bar_access_msg(pmr->dev, &pmr->mr, true, addr, &val, size,
+                        pmr->memory);
+}
+
+static uint64_t proxy_bar_read(void *opaque, hwaddr addr, unsigned size)
+{
+    ProxyMemoryRegion *pmr = opaque;
+    uint64_t val;
+
+    send_bar_access_msg(pmr->dev, &pmr->mr, false, addr, &val, size,
+                        pmr->memory);
+
+    return val;
+}
+
+const MemoryRegionOps proxy_mr_ops = {
+    .read = proxy_bar_read,
+    .write = proxy_bar_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .impl = {
+        .min_access_size = 1,
+        .max_access_size = 8,
+    },
+};
diff --git a/include/hw/pci/proxy.h b/include/hw/pci/proxy.h
index 4ae7becf34..e5e18f0b15 100644
--- a/include/hw/pci/proxy.h
+++ b/include/hw/pci/proxy.h
@@ -17,7 +17,17 @@
 #define PCI_PROXY_DEV(obj) \
             OBJECT_CHECK(PCIProxyDev, (obj), TYPE_PCI_PROXY_DEV)
 
-typedef struct PCIProxyDev {
+typedef struct PCIProxyDev PCIProxyDev;
+
+typedef struct ProxyMemoryRegion {
+    PCIProxyDev *dev;
+    MemoryRegion mr;
+    bool memory;
+    bool present;
+    uint8_t type;
+} ProxyMemoryRegion;
+
+struct PCIProxyDev {
     PCIDevice parent_dev;
     char *fd;
 
@@ -29,6 +39,8 @@ typedef struct PCIProxyDev {
      */
     QemuMutex io_mutex;
     QIOChannel *ioc;
-} PCIProxyDev;
+
+    ProxyMemoryRegion region[PCI_NUM_REGIONS];
+};
 
 #endif /* PROXY_H */
diff --git a/include/io/mpqemu-link.h b/include/io/mpqemu-link.h
index a0ffedcdb1..967ae24e3a 100644
--- a/include/io/mpqemu-link.h
+++ b/include/io/mpqemu-link.h
@@ -36,6 +36,8 @@ typedef enum {
     RET_MSG,
     PCI_CONFIG_WRITE,
     PCI_CONFIG_READ,
+    BAR_WRITE,
+    BAR_READ,
     MAX = INT_MAX,
 } MPQemuCmd;
 
@@ -51,6 +53,13 @@ typedef struct {
     int l;
 } ConfDataMsg;
 
+typedef struct {
+    hwaddr addr;
+    uint64_t val;
+    unsigned size;
+    bool memory;
+} BarAccessMsg;
+
 /**
  * MPQemuMsg:
  * @cmd: The remote command
@@ -70,6 +79,7 @@ typedef struct {
         uint64_t u64;
         ConfDataMsg conf_data;
         SyncSysmemMsg sync_sysmem;
+        BarAccessMsg bar_access;
     } data;
 
     int fds[REMOTE_MAX_FDS];
diff --git a/io/mpqemu-link.c b/io/mpqemu-link.c
index af9f536660..9233df215e 100644
--- a/io/mpqemu-link.c
+++ b/io/mpqemu-link.c
@@ -328,6 +328,12 @@ bool mpqemu_msg_valid(MPQemuMsg *msg)
             return false;
         }
         break;
+    case BAR_WRITE:
+    case BAR_READ:
+        if ((msg->size != sizeof(BarAccessMsg)) || (msg->num_fds != 0)) {
+            return false;
+        }
+        break;
     default:
         break;
     }
-- 
2.25.GIT



  parent reply	other threads:[~2020-08-27 18:17 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-27 18:12 [PATCH v9 00/20] Initial support for multi-process Qemu elena.ufimtseva
2020-08-27 18:12 ` [PATCH v9 01/20] memory: alloc RAM from file at offset elena.ufimtseva
2020-08-27 18:12 ` [PATCH v9 02/20] multi-process: Add config option for multi-process QEMU elena.ufimtseva
2020-08-27 18:12 ` [PATCH v9 03/20] multi-process: setup PCI host bridge for remote device elena.ufimtseva
2020-09-14 15:46   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 04/20] multi-process: setup a machine object for remote device process elena.ufimtseva
2020-09-15 13:01   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 05/20] multi-process: add qio channel function to transmit elena.ufimtseva
2020-08-27 18:12 ` [PATCH v9 06/20] multi-process: define MPQemuMsg format and transmission functions elena.ufimtseva
2020-09-23 13:47   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 07/20] multi-process: define transmission functions in remote elena.ufimtseva
2020-09-23 14:02   ` Stefan Hajnoczi
2020-09-24 17:18     ` Elena Ufimtseva
2020-08-27 18:12 ` [PATCH v9 08/20] multi-process: Initialize message handler in remote device elena.ufimtseva
2020-09-23 14:10   ` Stefan Hajnoczi
2020-09-24 17:20     ` Elena Ufimtseva
2020-08-27 18:12 ` [PATCH v9 09/20] multi-process: Associate fd of a PCIDevice with its object elena.ufimtseva
2020-09-23 14:17   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 10/20] multi-process: setup memory manager for remote device elena.ufimtseva
2020-09-23 15:03   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 11/20] multi-process: introduce proxy object elena.ufimtseva
2020-09-23 15:06   ` Stefan Hajnoczi
2020-09-23 15:10   ` Michael S. Tsirkin
2020-09-24 14:33     ` Jag Raman
2020-08-27 18:12 ` [PATCH v9 12/20] multi-process: add proxy communication functions elena.ufimtseva
2020-09-23 15:55   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 13/20] multi-process: Forward PCI config space acceses to the remote process elena.ufimtseva
2020-09-23 16:01   ` Stefan Hajnoczi
2020-08-27 18:12 ` elena.ufimtseva [this message]
2020-09-24  7:51   ` [PATCH v9 14/20] multi-process: PCI BAR read/write handling for proxy & remote endpoints Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 15/20] multi-process: Synchronize remote memory elena.ufimtseva
2020-09-24  8:27   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 16/20] multi-process: create IOHUB object to handle irq elena.ufimtseva
2020-09-24  8:29   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 17/20] multi-process: Retrieve PCI info from remote process elena.ufimtseva
2020-09-24  8:30   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 18/20] multi-process: perform device reset in the " elena.ufimtseva
2020-09-24  8:31   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 19/20] multi-process: add the concept description to docs/devel/qemu-multiprocess elena.ufimtseva
2020-09-24  8:32   ` Stefan Hajnoczi
2020-08-27 18:12 ` [PATCH v9 20/20] multi-process: add configure and usage information elena.ufimtseva
2020-09-24  8:32   ` Stefan Hajnoczi
2020-09-23 15:47 ` [PATCH v9 00/20] Initial support for multi-process Qemu Michael S. Tsirkin
2020-09-24  8:38 ` Stefan Hajnoczi
2020-09-24 14:33   ` Jag Raman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200827181231.22778-15-elena.ufimtseva@oracle.com \
    --to=elena.ufimtseva@oracle.com \
    --cc=armbru@redhat.com \
    --cc=berrange@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=fam@euphon.net \
    --cc=felipe@nutanix.com \
    --cc=jag.raman@oracle.com \
    --cc=john.g.johnson@oracle.com \
    --cc=kanth.ghatraju@oracle.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kraxel@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=marcandre.lureau@gmail.com \
    --cc=mreitz@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=ross.lagerwall@citrix.com \
    --cc=rth@twiddle.net \
    --cc=stefanha@redhat.com \
    --cc=swapnil.ingle@nutanix.com \
    --cc=thanos.makatos@nutanix.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).