qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Vikram Garhwal <vikram.garhwal@amd.com>
To: Stefano Stabellini <sstabellini@kernel.org>
Cc: qemu-devel@nongnu.org, jgross@suse.com,
	"Anthony Perard" <anthony.perard@citrix.com>,
	"Paul Durrant" <paul@xen.org>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Peter Xu" <peterx@redhat.com>,
	"David Hildenbrand" <david@redhat.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"open list:X86 Xen CPUs" <xen-devel@lists.xenproject.org>
Subject: Re: [QEMU][PATCHv2 7/8] xen: add map and unmap callbacks for grant region
Date: Wed, 25 Oct 2023 21:35:24 -0700	[thread overview]
Message-ID: <ZTnsjCo2kPyWk_ZT@amd.com> (raw)
In-Reply-To: <alpine.DEB.2.22.394.2310251832100.271731@ubuntu-linux-20-04-desktop>

On Wed, Oct 25, 2023 at 06:32:26PM -0700, Stefano Stabellini wrote:
> On Wed, 25 Oct 2023, Vikram Garhwal wrote:
> > From: Juergen Gross <jgross@suse.com>
> > 
> > Add the callbacks for mapping/unmapping guest memory via grants to the
> > special grant memory region.
> > 
> > Signed-off-by: Juergen Gross <jgross@suse.com>
> > Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
> > ---
> >  hw/xen/xen-mapcache.c | 175 +++++++++++++++++++++++++++++++++++++++++-
> >  system/physmem.c      |  11 ++-
> >  2 files changed, 181 insertions(+), 5 deletions(-)
> > 
> > diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
> > index 8a61c7dde6..feb4a3b886 100644
> > --- a/hw/xen/xen-mapcache.c
> > +++ b/hw/xen/xen-mapcache.c
> > @@ -9,6 +9,8 @@
> >   */
> >  
> >  #include "qemu/osdep.h"
> > +#include "qemu/queue.h"
> > +#include "qemu/thread.h"
> >  #include "qemu/units.h"
> >  #include "qemu/error-report.h"
> >  
> > @@ -23,6 +25,8 @@
> >  #include "sysemu/xen-mapcache.h"
> >  #include "trace.h"
> >  
> > +#include <xenevtchn.h>
> > +#include <xengnttab.h>
> >  
> >  //#define MAPCACHE_DEBUG
> >  
> > @@ -385,7 +389,7 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
> >      return p;
> >  }
> >  
> > -ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
> > +static ram_addr_t xen_ram_addr_from_mapcache_try(void *ptr)
> >  {
> >      MapCacheEntry *entry = NULL;
> >      MapCacheRev *reventry;
> > @@ -594,10 +598,178 @@ uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
> >      return p;
> >  }
> >  
> > +struct XENMappedGrantRegion {
> > +    void *addr;
> > +    unsigned int pages;
> > +    unsigned int refs;
> > +    unsigned int prot;
> > +    uint32_t idx;
> > +    QLIST_ENTRY(XENMappedGrantRegion) list;
> > +};
> > +
> > +static xengnttab_handle *xen_region_gnttabdev;
> > +static QLIST_HEAD(GrantRegionList, XENMappedGrantRegion) xen_grant_mappings =
> > +    QLIST_HEAD_INITIALIZER(xen_grant_mappings);
> > +static QemuMutex xen_map_mutex;
> > +
> > +static void *xen_map_grant_dyn(MemoryRegion **mr, hwaddr addr, hwaddr *plen,
> > +                               bool is_write, MemTxAttrs attrs)
> > +{
> > +    unsigned int page_off = addr & (XC_PAGE_SIZE - 1);
> > +    unsigned int i;
> > +    unsigned int total_grants = 0;
> > +    unsigned int nrefs = (page_off + *plen + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
> > +    uint32_t ref = (addr - XEN_GRANT_ADDR_OFF) >> XC_PAGE_SHIFT;
> > +    uint32_t *refs = NULL;
> > +    unsigned int prot = PROT_READ;
> > +    struct XENMappedGrantRegion *mgr = NULL;
> > +
> > +    if (is_write) {
> > +        prot |= PROT_WRITE;
> > +    }
> > +
> > +    qemu_mutex_lock(&xen_map_mutex);
> > +
> > +    QLIST_FOREACH(mgr, &xen_grant_mappings, list) {
> > +        if (mgr->idx == ref &&
> > +            mgr->pages == nrefs &&
> > +            (mgr->prot & prot) == prot) {
> > +            break;
> > +        }
> > +
> > +        total_grants += mgr->pages;
> > +    }
> > +
> > +    if (!mgr) {
> > +        if (nrefs + total_grants >= XEN_MAX_VIRTIO_GRANTS) {
> > +            return NULL;
> 
> missing qemu_mutex_unlock
Oops, thanks for catching this! Will fix it in v3.
> 
> 
> > +        }
> > +
> > +        mgr = g_new(struct XENMappedGrantRegion, 1);
> > +
> > +        if (nrefs == 1) {
> > +            refs = &ref;
> > +        } else {
> > +            refs = g_new(uint32_t, nrefs);
> > +            for (i = 0; i < nrefs; i++) {
> > +                refs[i] = ref + i;
> > +            }
> > +        }
> > +        mgr->addr = xengnttab_map_domain_grant_refs(xen_region_gnttabdev, nrefs,
> > +                                                    xen_domid, refs, prot);
> > +        if (mgr->addr) {
> > +            mgr->pages = nrefs;
> > +            mgr->refs = 1;
> > +            mgr->prot = prot;
> > +            mgr->idx = ref;
> > +
> > +            QLIST_INSERT_HEAD(&xen_grant_mappings, mgr, list);
> > +        } else {
> > +            g_free(mgr);
> > +            mgr = NULL;
> > +        }
> > +    } else {
> > +        mgr->refs++;
> > +    }
> > +
> > +    qemu_mutex_unlock(&xen_map_mutex);
> > +
> > +    if (nrefs > 1) {
> > +        g_free(refs);
> > +    }
> > +
> > +    return mgr ? mgr->addr + page_off : NULL;
> > +}
> > +
> > +static void xen_unmap_grant_dyn(MemoryRegion *mr, void *buffer, ram_addr_t addr,
> > +                                hwaddr len, bool is_write, hwaddr access_len)
> > +{
> > +    unsigned int page_off = (unsigned long)buffer & (XC_PAGE_SIZE - 1);
> > +    unsigned int nrefs = (page_off + len + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
> > +    unsigned int prot = PROT_READ;
> > +    struct XENMappedGrantRegion *mgr = NULL;
> > +
> > +    if (is_write) {
> > +        prot |= PROT_WRITE;
> > +    }
> > +
> > +    qemu_mutex_lock(&xen_map_mutex);
> > +
> > +    QLIST_FOREACH(mgr, &xen_grant_mappings, list) {
> > +        if (mgr->addr == buffer - page_off &&
> > +            mgr->pages == nrefs &&
> > +            (mgr->prot & prot) == prot) {
> > +            break;
> > +        }
> > +    }
> > +    if (mgr) {
> > +        mgr->refs--;
> > +        if (!mgr->refs) {
> > +            xengnttab_unmap(xen_region_gnttabdev, mgr->addr, nrefs);
> > +
> > +            QLIST_REMOVE(mgr, list);
> > +            g_free(mgr);
> > +        }
> > +    } else {
> > +        error_report("xen_unmap_grant_dyn() trying to unmap unknown buffer");
> > +    }
> > +
> > +    qemu_mutex_unlock(&xen_map_mutex);
> > +}
> > +
> > +static ram_addr_t xen_ram_addr_from_grant_cache(void *ptr)
> > +{
> > +    unsigned int page_off = (unsigned long)ptr & (XC_PAGE_SIZE - 1);
> > +    struct XENMappedGrantRegion *mgr = NULL;
> > +    ram_addr_t raddr = RAM_ADDR_INVALID;
> > +
> > +    qemu_mutex_lock(&xen_map_mutex);
> > +
> > +    QLIST_FOREACH(mgr, &xen_grant_mappings, list) {
> > +        if (mgr->addr == ptr - page_off) {
> > +            break;
> > +        }
> > +    }
> > +
> > +    if (mgr) {
> > +        raddr = (mgr->idx << XC_PAGE_SHIFT) + page_off + XEN_GRANT_ADDR_OFF;
> > +    }
> > +
> > +    qemu_mutex_unlock(&xen_map_mutex);
> > +
> > +    return raddr;
> > +}
> > +
> > +ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
> > +{
> > +    ram_addr_t raddr;
> > +
> > +    raddr = xen_ram_addr_from_mapcache_try(ptr);
> > +    if (raddr == RAM_ADDR_INVALID) {
> > +        raddr = xen_ram_addr_from_grant_cache(ptr);
> > +    }
> > +
> > +    return raddr;
> > +}
> > +
> > +static const struct MemoryRegionOps xen_grant_mr_ops = {
> > +    .map = xen_map_grant_dyn,
> > +    .unmap = xen_unmap_grant_dyn,
> > +    .endianness = DEVICE_LITTLE_ENDIAN,
> > +};
> > +
> >  MemoryRegion *xen_init_grant_ram(void)
> >  {
> >      RAMBlock *block;
> >  
> > +    qemu_mutex_init(&xen_map_mutex);
> > +
> > +    xen_region_gnttabdev = xengnttab_open(NULL, 0);
> > +    if (xen_region_gnttabdev == NULL) {
> > +        fprintf(stderr, "can't open gnttab device\n");
> > +        return NULL;
> > +    }
> > +
> >      memory_region_init(&ram_grants, NULL, "xen.grants",
> >                         XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE);
> >      block = g_malloc0(sizeof(*block));
> > @@ -612,6 +784,7 @@ MemoryRegion *xen_init_grant_ram(void)
> >      ram_grants.ram_block = block;
> >      ram_grants.ram = true;
> >      ram_grants.terminates = true;
> > +    ram_grants.ops = &xen_grant_mr_ops;
> >      ram_block_add_list(block);
> >      memory_region_add_subregion(get_system_memory(), XEN_GRANT_ADDR_OFF,
> >                                  &ram_grants);
> > diff --git a/system/physmem.c b/system/physmem.c
> > index 5db1b32823..155a8c05fb 100644
> > --- a/system/physmem.c
> > +++ b/system/physmem.c
> > @@ -2233,13 +2233,16 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
> >  
> >      if (xen_enabled()) {
> >          ram_addr_t ram_addr;
> > +
> >          RCU_READ_LOCK_GUARD();
> >          ram_addr = xen_ram_addr_from_mapcache(ptr);
> > -        block = qemu_get_ram_block(ram_addr);
> > -        if (block) {
> > -            *offset = ram_addr - block->offset;
> > +        if (ram_addr != RAM_ADDR_INVALID) {
> > +            block = qemu_get_ram_block(ram_addr);
> > +            if (block) {
> > +                *offset = ram_addr - block->offset;
> > +            }
> > +            return block;
> >          }
> > -        return block;
> >      }
> >  
> >      RCU_READ_LOCK_GUARD();
> > -- 
> > 2.17.1
> > 


  reply	other threads:[~2023-10-26  4:36 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-25 21:24 [QEMU][PATCHv2 0/8] Xen: support grant mappings Vikram Garhwal
2023-10-25 21:24 ` [QEMU][PATCHv2 1/8] xen: when unplugging emulated devices skip virtio devices Vikram Garhwal
2023-10-25 23:22   ` David Woodhouse
2023-10-26  1:23     ` Stefano Stabellini
2023-10-26 15:45       ` David Woodhouse
2023-10-26 17:13         ` Vikram Garhwal
2023-10-25 21:24 ` [QEMU][PATCHv2 2/8] softmmu: physmem: Split ram_block_add() Vikram Garhwal
2023-10-26  1:25   ` Stefano Stabellini
2023-10-25 21:24 ` [QEMU][PATCHv2 3/8] xen: add pseudo RAM region for grant mappings Vikram Garhwal
2023-10-26  1:27   ` Stefano Stabellini
2023-10-25 21:24 ` [QEMU][PATCHv2 4/8] softmmu: let qemu_map_ram_ptr() use qemu_ram_ptr_length() Vikram Garhwal
2023-10-26  1:28   ` Stefano Stabellini
2023-10-25 21:24 ` [QEMU][PATCHv2 5/8] xen: let xen_ram_addr_from_mapcache() return -1 in case of not found entry Vikram Garhwal
2023-10-25 21:24 ` [QEMU][PATCHv2 6/8] memory: add MemoryRegion map and unmap callbacks Vikram Garhwal
2023-10-25 21:24 ` [QEMU][PATCHv2 7/8] xen: add map and unmap callbacks for grant region Vikram Garhwal
2023-10-26  1:32   ` Stefano Stabellini
2023-10-26  4:35     ` Vikram Garhwal [this message]
2023-10-25 21:24 ` [QEMU][PATCHv2 8/8] hw: arm: Add grant mapping Vikram Garhwal
2023-10-26 16:12 ` [QEMU][PATCHv2 0/8] Xen: support grant mappings David Woodhouse
2023-10-26 18:07   ` Stefano Stabellini
2023-10-26 20:15     ` David Woodhouse
2023-10-26 20:36       ` Stefano Stabellini
2023-10-26 20:44         ` David Woodhouse
2023-10-26 20:56           ` Stefano Stabellini
2023-10-27  5:27             ` Juergen Gross
2023-11-13 20:24               ` David Woodhouse
2023-11-14  6:19                 ` Juergen Gross
2023-11-14 20:58                   ` Stefano Stabellini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZTnsjCo2kPyWk_ZT@amd.com \
    --to=vikram.garhwal@amd.com \
    --cc=anthony.perard@citrix.com \
    --cc=david@redhat.com \
    --cc=jgross@suse.com \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).