* [PATCH v6 1/8] xen: mapcache: Make MCACHE_BUCKET_SHIFT runtime configurable
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
@ 2024-05-16 15:47 ` Edgar E. Iglesias
2024-05-16 15:47 ` [PATCH v6 2/8] xen: mapcache: Unmap first entries in buckets Edgar E. Iglesias
` (6 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:47 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant, xen-devel
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Make MCACHE_BUCKET_SHIFT runtime configurable per cache instance.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
hw/xen/xen-mapcache.c | 54 ++++++++++++++++++++++++++-----------------
1 file changed, 33 insertions(+), 21 deletions(-)
diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
index fa6813b1ad..bc860f4373 100644
--- a/hw/xen/xen-mapcache.c
+++ b/hw/xen/xen-mapcache.c
@@ -23,13 +23,10 @@
#if HOST_LONG_BITS == 32
-# define MCACHE_BUCKET_SHIFT 16
# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
#else
-# define MCACHE_BUCKET_SHIFT 20
# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
#endif
-#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
/* This is the size of the virtual address space reserve to QEMU that will not
* be use by MapCache.
@@ -65,7 +62,8 @@ typedef struct MapCache {
/* For most cases (>99.9%), the page address is the same. */
MapCacheEntry *last_entry;
unsigned long max_mcache_size;
- unsigned int mcache_bucket_shift;
+ unsigned int bucket_shift;
+ unsigned long bucket_size;
phys_offset_to_gaddr_t phys_offset_to_gaddr;
QemuMutex lock;
@@ -95,11 +93,14 @@ static inline int test_bits(int nr, int size, const unsigned long *addr)
static MapCache *xen_map_cache_init_single(phys_offset_to_gaddr_t f,
void *opaque,
+ unsigned int bucket_shift,
unsigned long max_size)
{
unsigned long size;
MapCache *mc;
+ assert(bucket_shift >= XC_PAGE_SHIFT);
+
mc = g_new0(MapCache, 1);
mc->phys_offset_to_gaddr = f;
@@ -108,12 +109,14 @@ static MapCache *xen_map_cache_init_single(phys_offset_to_gaddr_t f,
QTAILQ_INIT(&mc->locked_entries);
+ mc->bucket_shift = bucket_shift;
+ mc->bucket_size = 1UL << bucket_shift;
mc->max_mcache_size = max_size;
mc->nr_buckets =
(((mc->max_mcache_size >> XC_PAGE_SHIFT) +
- (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
- (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
+ (1UL << (bucket_shift - XC_PAGE_SHIFT)) - 1) >>
+ (bucket_shift - XC_PAGE_SHIFT));
size = mc->nr_buckets * sizeof(MapCacheEntry);
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
@@ -126,6 +129,13 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
{
struct rlimit rlimit_as;
unsigned long max_mcache_size;
+ unsigned int bucket_shift;
+
+ if (HOST_LONG_BITS == 32) {
+ bucket_shift = 16;
+ } else {
+ bucket_shift = 20;
+ }
if (geteuid() == 0) {
rlimit_as.rlim_cur = RLIM_INFINITY;
@@ -146,7 +156,9 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
}
}
- mapcache = xen_map_cache_init_single(f, opaque, max_mcache_size);
+ mapcache = xen_map_cache_init_single(f, opaque,
+ bucket_shift,
+ max_mcache_size);
setrlimit(RLIMIT_AS, &rlimit_as);
}
@@ -195,7 +207,7 @@ static void xen_remap_bucket(MapCache *mc,
entry->valid_mapping = NULL;
for (i = 0; i < nb_pfn; i++) {
- pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
+ pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
}
/*
@@ -266,8 +278,8 @@ static uint8_t *xen_map_cache_unlocked(MapCache *mc,
bool dummy = false;
tryagain:
- address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
- address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
+ address_index = phys_addr >> mc->bucket_shift;
+ address_offset = phys_addr & (mc->bucket_size - 1);
trace_xen_map_cache(phys_addr);
@@ -294,14 +306,14 @@ tryagain:
return mc->last_entry->vaddr_base + address_offset;
}
- /* size is always a multiple of MCACHE_BUCKET_SIZE */
+ /* size is always a multiple of mc->bucket_size */
if (size) {
cache_size = size + address_offset;
- if (cache_size % MCACHE_BUCKET_SIZE) {
- cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
+ if (cache_size % mc->bucket_size) {
+ cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
}
} else {
- cache_size = MCACHE_BUCKET_SIZE;
+ cache_size = mc->bucket_size;
}
entry = &mc->entry[address_index % mc->nr_buckets];
@@ -422,7 +434,7 @@ static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
trace_xen_ram_addr_from_mapcache_not_in_cache(ptr);
raddr = RAM_ADDR_INVALID;
} else {
- raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
+ raddr = (reventry->paddr_index << mc->bucket_shift) +
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
}
mapcache_unlock(mc);
@@ -585,8 +597,8 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
hwaddr address_index, address_offset;
hwaddr test_bit_size, cache_size = size;
- address_index = old_phys_addr >> MCACHE_BUCKET_SHIFT;
- address_offset = old_phys_addr & (MCACHE_BUCKET_SIZE - 1);
+ address_index = old_phys_addr >> mc->bucket_shift;
+ address_offset = old_phys_addr & (mc->bucket_size - 1);
assert(size);
/* test_bit_size is always a multiple of XC_PAGE_SIZE */
@@ -595,8 +607,8 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
}
cache_size = size + address_offset;
- if (cache_size % MCACHE_BUCKET_SIZE) {
- cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
+ if (cache_size % mc->bucket_size) {
+ cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
}
entry = &mc->entry[address_index % mc->nr_buckets];
@@ -609,8 +621,8 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
return NULL;
}
- address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT;
- address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1);
+ address_index = new_phys_addr >> mc->bucket_shift;
+ address_offset = new_phys_addr & (mc->bucket_size - 1);
trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v6 2/8] xen: mapcache: Unmap first entries in buckets
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
2024-05-16 15:47 ` [PATCH v6 1/8] xen: mapcache: Make MCACHE_BUCKET_SHIFT runtime configurable Edgar E. Iglesias
@ 2024-05-16 15:47 ` Edgar E. Iglesias
2024-05-16 15:47 ` [PATCH v6 3/8] xen: Add xen_mr_is_memory() Edgar E. Iglesias
` (5 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:47 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant, xen-devel
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
When invalidating memory ranges, if we happen to hit the first
entry in a bucket we were never unmapping it. This was harmless
for foreign mappings but now that we're looking to reuse the
mapcache for transient grant mappings, we must unmap entries
when invalidated.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
hw/xen/xen-mapcache.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
index bc860f4373..ec95445696 100644
--- a/hw/xen/xen-mapcache.c
+++ b/hw/xen/xen-mapcache.c
@@ -491,18 +491,23 @@ static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
return;
}
entry->lock--;
- if (entry->lock > 0 || pentry == NULL) {
+ if (entry->lock > 0) {
return;
}
- pentry->next = entry->next;
ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
if (munmap(entry->vaddr_base, entry->size) != 0) {
perror("unmap fails");
exit(-1);
}
+
g_free(entry->valid_mapping);
- g_free(entry);
+ if (pentry) {
+ pentry->next = entry->next;
+ g_free(entry);
+ } else {
+ memset(entry, 0, sizeof *entry);
+ }
}
typedef struct XenMapCacheData {
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v6 3/8] xen: Add xen_mr_is_memory()
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
2024-05-16 15:47 ` [PATCH v6 1/8] xen: mapcache: Make MCACHE_BUCKET_SHIFT runtime configurable Edgar E. Iglesias
2024-05-16 15:47 ` [PATCH v6 2/8] xen: mapcache: Unmap first entries in buckets Edgar E. Iglesias
@ 2024-05-16 15:47 ` Edgar E. Iglesias
2024-05-16 15:48 ` [PATCH v6 4/8] softmmu: xen: Always pass offset + addr to xen_map_cache Edgar E. Iglesias
` (4 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:47 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
David Hildenbrand, Anthony PERARD, Paul Durrant, xen-devel
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Add xen_mr_is_memory() to abstract away tests for the
xen_memory MR.
No functional changes.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: David Hildenbrand <david@redhat.com>
---
hw/xen/xen-hvm-common.c | 10 ++++++++--
include/sysemu/xen.h | 8 ++++++++
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index 2d1b032121..a0a0252da0 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -12,6 +12,12 @@
MemoryRegion xen_memory;
+/* Check for xen memory. */
+bool xen_mr_is_memory(MemoryRegion *mr)
+{
+ return mr == &xen_memory;
+}
+
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
Error **errp)
{
@@ -28,7 +34,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
return;
}
- if (mr == &xen_memory) {
+ if (xen_mr_is_memory(mr)) {
return;
}
@@ -55,7 +61,7 @@ static void xen_set_memory(struct MemoryListener *listener,
{
XenIOState *state = container_of(listener, XenIOState, memory_listener);
- if (section->mr == &xen_memory) {
+ if (xen_mr_is_memory(section->mr)) {
return;
} else {
if (add) {
diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
index 754ec2e6cb..dc72f83bcb 100644
--- a/include/sysemu/xen.h
+++ b/include/sysemu/xen.h
@@ -34,6 +34,8 @@ void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr, Error **errp);
+bool xen_mr_is_memory(MemoryRegion *mr);
+
#else /* !CONFIG_XEN_IS_POSSIBLE */
#define xen_enabled() 0
@@ -47,6 +49,12 @@ static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
g_assert_not_reached();
}
+static inline bool xen_mr_is_memory(MemoryRegion *mr)
+{
+ g_assert_not_reached();
+ return false;
+}
+
#endif /* CONFIG_XEN_IS_POSSIBLE */
#endif
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v6 4/8] softmmu: xen: Always pass offset + addr to xen_map_cache
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
` (2 preceding siblings ...)
2024-05-16 15:47 ` [PATCH v6 3/8] xen: Add xen_mr_is_memory() Edgar E. Iglesias
@ 2024-05-16 15:48 ` Edgar E. Iglesias
2024-05-16 19:55 ` David Hildenbrand
2024-05-16 15:48 ` [PATCH v6 5/8] softmmu: Replace check for RAMBlock offset 0 with xen_mr_is_memory Edgar E. Iglesias
` (3 subsequent siblings)
7 siblings, 1 reply; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:48 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Paolo Bonzini, Peter Xu, David Hildenbrand,
Philippe Mathieu-Daudé
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Always pass address with offset to xen_map_cache().
This is in preparation for support for grant mappings.
Since this is within a block that checks for offset == 0,
this has no functional changes.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
system/physmem.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/system/physmem.c b/system/physmem.c
index 342b7a8fd4..5e6257ef65 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -2230,7 +2230,8 @@ static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
* In that case just map the requested area.
*/
if (block->offset == 0) {
- return xen_map_cache(block->mr, addr, len, lock, lock,
+ return xen_map_cache(block->mr, block->offset + addr,
+ len, lock, lock,
is_write);
}
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v6 4/8] softmmu: xen: Always pass offset + addr to xen_map_cache
2024-05-16 15:48 ` [PATCH v6 4/8] softmmu: xen: Always pass offset + addr to xen_map_cache Edgar E. Iglesias
@ 2024-05-16 19:55 ` David Hildenbrand
0 siblings, 0 replies; 17+ messages in thread
From: David Hildenbrand @ 2024-05-16 19:55 UTC (permalink / raw)
To: Edgar E. Iglesias, qemu-devel
Cc: sstabellini, jgross, Edgar E. Iglesias, Paolo Bonzini, Peter Xu,
Philippe Mathieu-Daudé
On 16.05.24 17:48, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
> Always pass address with offset to xen_map_cache().
> This is in preparation for support for grant mappings.
>
> Since this is within a block that checks for offset == 0,
> this has no functional changes.
>
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
Reviewed-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v6 5/8] softmmu: Replace check for RAMBlock offset 0 with xen_mr_is_memory
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
` (3 preceding siblings ...)
2024-05-16 15:48 ` [PATCH v6 4/8] softmmu: xen: Always pass offset + addr to xen_map_cache Edgar E. Iglesias
@ 2024-05-16 15:48 ` Edgar E. Iglesias
2024-05-16 19:56 ` David Hildenbrand
2024-05-16 15:48 ` [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache() Edgar E. Iglesias
` (2 subsequent siblings)
7 siblings, 1 reply; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:48 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Paolo Bonzini, Peter Xu, David Hildenbrand,
Philippe Mathieu-Daudé
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
For xen, when checking for the first RAM (xen_memory), use
xen_mr_is_memory() rather than checking for a RAMBlock with
offset 0.
All Xen machines create xen_memory first so this has no
functional change for existing machines.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
system/physmem.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/system/physmem.c b/system/physmem.c
index 5e6257ef65..b7847db1a2 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -2229,7 +2229,7 @@ static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
* because we don't want to map the entire memory in QEMU.
* In that case just map the requested area.
*/
- if (block->offset == 0) {
+ if (xen_mr_is_memory(block->mr)) {
return xen_map_cache(block->mr, block->offset + addr,
len, lock, lock,
is_write);
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v6 5/8] softmmu: Replace check for RAMBlock offset 0 with xen_mr_is_memory
2024-05-16 15:48 ` [PATCH v6 5/8] softmmu: Replace check for RAMBlock offset 0 with xen_mr_is_memory Edgar E. Iglesias
@ 2024-05-16 19:56 ` David Hildenbrand
0 siblings, 0 replies; 17+ messages in thread
From: David Hildenbrand @ 2024-05-16 19:56 UTC (permalink / raw)
To: Edgar E. Iglesias, qemu-devel
Cc: sstabellini, jgross, Edgar E. Iglesias, Paolo Bonzini, Peter Xu,
Philippe Mathieu-Daudé
On 16.05.24 17:48, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
> For xen, when checking for the first RAM (xen_memory), use
> xen_mr_is_memory() rather than checking for a RAMBlock with
> offset 0.
>
> All Xen machines create xen_memory first so this has no
> functional change for existing machines.
>
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
Reviewed-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache()
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
` (4 preceding siblings ...)
2024-05-16 15:48 ` [PATCH v6 5/8] softmmu: Replace check for RAMBlock offset 0 with xen_mr_is_memory Edgar E. Iglesias
@ 2024-05-16 15:48 ` Edgar E. Iglesias
2024-05-16 19:57 ` David Hildenbrand
2024-05-17 0:12 ` Stefano Stabellini
2024-05-16 15:48 ` [PATCH v6 7/8] xen: mapcache: Add support for grant mappings Edgar E. Iglesias
2024-05-16 15:48 ` [PATCH v6 8/8] hw/arm: xen: Enable use of " Edgar E. Iglesias
7 siblings, 2 replies; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:48 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant, Paolo Bonzini, Peter Xu,
David Hildenbrand, Philippe Mathieu-Daudé, xen-devel
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Pass the ram_addr offset to xen_map_cache.
This is in preparation for adding grant mappings that need
to compute the address within the RAMBlock.
No functional changes.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
---
hw/xen/xen-mapcache.c | 16 +++++++++++-----
include/sysemu/xen-mapcache.h | 2 ++
system/physmem.c | 9 +++++----
3 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
index ec95445696..a07c47b0b1 100644
--- a/hw/xen/xen-mapcache.c
+++ b/hw/xen/xen-mapcache.c
@@ -167,7 +167,8 @@ static void xen_remap_bucket(MapCache *mc,
void *vaddr,
hwaddr size,
hwaddr address_index,
- bool dummy)
+ bool dummy,
+ ram_addr_t ram_offset)
{
uint8_t *vaddr_base;
xen_pfn_t *pfns;
@@ -266,6 +267,7 @@ static void xen_remap_bucket(MapCache *mc,
static uint8_t *xen_map_cache_unlocked(MapCache *mc,
hwaddr phys_addr, hwaddr size,
+ ram_addr_t ram_offset,
uint8_t lock, bool dma, bool is_write)
{
MapCacheEntry *entry, *pentry = NULL,
@@ -337,14 +339,16 @@ tryagain:
if (!entry) {
entry = g_new0(MapCacheEntry, 1);
pentry->next = entry;
- xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy);
+ xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
+ ram_offset);
} else if (!entry->lock) {
if (!entry->vaddr_base || entry->paddr_index != address_index ||
entry->size != cache_size ||
!test_bits(address_offset >> XC_PAGE_SHIFT,
test_bit_size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
- xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy);
+ xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
+ ram_offset);
}
}
@@ -391,13 +395,15 @@ tryagain:
uint8_t *xen_map_cache(MemoryRegion *mr,
hwaddr phys_addr, hwaddr size,
+ ram_addr_t ram_addr_offset,
uint8_t lock, bool dma,
bool is_write)
{
uint8_t *p;
mapcache_lock(mapcache);
- p = xen_map_cache_unlocked(mapcache, phys_addr, size, lock, dma, is_write);
+ p = xen_map_cache_unlocked(mapcache, phys_addr, size, ram_addr_offset,
+ lock, dma, is_write);
mapcache_unlock(mapcache);
return p;
}
@@ -632,7 +638,7 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
xen_remap_bucket(mc, entry, entry->vaddr_base,
- cache_size, address_index, false);
+ cache_size, address_index, false, old_phys_addr);
if (!test_bits(address_offset >> XC_PAGE_SHIFT,
test_bit_size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
index 1ec9e66752..b5e3ea1bc0 100644
--- a/include/sysemu/xen-mapcache.h
+++ b/include/sysemu/xen-mapcache.h
@@ -19,6 +19,7 @@ typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
void xen_map_cache_init(phys_offset_to_gaddr_t f,
void *opaque);
uint8_t *xen_map_cache(MemoryRegion *mr, hwaddr phys_addr, hwaddr size,
+ ram_addr_t ram_addr_offset,
uint8_t lock, bool dma,
bool is_write);
ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
@@ -37,6 +38,7 @@ static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
static inline uint8_t *xen_map_cache(MemoryRegion *mr,
hwaddr phys_addr,
hwaddr size,
+ ram_addr_t ram_addr_offset,
uint8_t lock,
bool dma,
bool is_write)
diff --git a/system/physmem.c b/system/physmem.c
index b7847db1a2..33d09f7571 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -2231,13 +2231,14 @@ static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
*/
if (xen_mr_is_memory(block->mr)) {
return xen_map_cache(block->mr, block->offset + addr,
- len, lock, lock,
- is_write);
+ len, block->offset,
+ lock, lock, is_write);
}
block->host = xen_map_cache(block->mr, block->offset,
- block->max_length, 1,
- lock, is_write);
+ block->max_length,
+ block->offset,
+ 1, lock, is_write);
}
return ramblock_ptr(block, addr);
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache()
2024-05-16 15:48 ` [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache() Edgar E. Iglesias
@ 2024-05-16 19:57 ` David Hildenbrand
2024-05-17 0:12 ` Stefano Stabellini
1 sibling, 0 replies; 17+ messages in thread
From: David Hildenbrand @ 2024-05-16 19:57 UTC (permalink / raw)
To: Edgar E. Iglesias, qemu-devel
Cc: sstabellini, jgross, Edgar E. Iglesias, Anthony PERARD,
Paul Durrant, Paolo Bonzini, Peter Xu,
Philippe Mathieu-Daudé, xen-devel
On 16.05.24 17:48, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
> Pass the ram_addr offset to xen_map_cache.
> This is in preparation for adding grant mappings that need
> to compute the address within the RAMBlock.
>
> No functional changes.
>
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> ---
Reviewed-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache()
2024-05-16 15:48 ` [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache() Edgar E. Iglesias
2024-05-16 19:57 ` David Hildenbrand
@ 2024-05-17 0:12 ` Stefano Stabellini
1 sibling, 0 replies; 17+ messages in thread
From: Stefano Stabellini @ 2024-05-17 0:12 UTC (permalink / raw)
To: Edgar E. Iglesias
Cc: qemu-devel, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant, Paolo Bonzini, Peter Xu,
David Hildenbrand, Philippe Mathieu-Daudé, xen-devel
On Thu, 16 May 2024, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
> Pass the ram_addr offset to xen_map_cache.
> This is in preparation for adding grant mappings that need
> to compute the address within the RAMBlock.
>
> No functional changes.
>
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v6 7/8] xen: mapcache: Add support for grant mappings
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
` (5 preceding siblings ...)
2024-05-16 15:48 ` [PATCH v6 6/8] xen: mapcache: Pass the ram_addr offset to xen_map_cache() Edgar E. Iglesias
@ 2024-05-16 15:48 ` Edgar E. Iglesias
2024-05-23 7:35 ` Manos Pitsidianakis
2024-05-16 15:48 ` [PATCH v6 8/8] hw/arm: xen: Enable use of " Edgar E. Iglesias
7 siblings, 1 reply; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:48 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant, xen-devel
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Add a second mapcache for grant mappings. The mapcache for
grants needs to work with XC_PAGE_SIZE granularity since
we can't map larger ranges than what has been granted to us.
Like with foreign mappings (xen_memory), machines using grants
are expected to initialize the xen_grants MR and map it
into their address-map accordingly.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
hw/xen/xen-hvm-common.c | 12 ++-
hw/xen/xen-mapcache.c | 163 ++++++++++++++++++++++++++------
include/hw/xen/xen-hvm-common.h | 3 +
include/sysemu/xen.h | 7 ++
4 files changed, 152 insertions(+), 33 deletions(-)
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index a0a0252da0..b8ace1c368 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -10,12 +10,18 @@
#include "hw/boards.h"
#include "hw/xen/arch_hvm.h"
-MemoryRegion xen_memory;
+MemoryRegion xen_memory, xen_grants;
-/* Check for xen memory. */
+/* Check for any kind of xen memory, foreign mappings or grants. */
bool xen_mr_is_memory(MemoryRegion *mr)
{
- return mr == &xen_memory;
+ return mr == &xen_memory || mr == &xen_grants;
+}
+
+/* Check specifically for grants. */
+bool xen_mr_is_grants(MemoryRegion *mr)
+{
+ return mr == &xen_grants;
}
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
index a07c47b0b1..1cbc2aeaa9 100644
--- a/hw/xen/xen-mapcache.c
+++ b/hw/xen/xen-mapcache.c
@@ -14,6 +14,7 @@
#include <sys/resource.h>
+#include "hw/xen/xen-hvm-common.h"
#include "hw/xen/xen_native.h"
#include "qemu/bitmap.h"
@@ -21,6 +22,8 @@
#include "sysemu/xen-mapcache.h"
#include "trace.h"
+#include <xenevtchn.h>
+#include <xengnttab.h>
#if HOST_LONG_BITS == 32
# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
@@ -41,6 +44,7 @@ typedef struct MapCacheEntry {
unsigned long *valid_mapping;
uint32_t lock;
#define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
+#define XEN_MAPCACHE_ENTRY_GRANT (1 << 1)
uint8_t flags;
hwaddr size;
struct MapCacheEntry *next;
@@ -71,6 +75,8 @@ typedef struct MapCache {
} MapCache;
static MapCache *mapcache;
+static MapCache *mapcache_grants;
+static xengnttab_handle *xen_region_gnttabdev;
static inline void mapcache_lock(MapCache *mc)
{
@@ -131,6 +137,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
unsigned long max_mcache_size;
unsigned int bucket_shift;
+ xen_region_gnttabdev = xengnttab_open(NULL, 0);
+ if (xen_region_gnttabdev == NULL) {
+ error_report("mapcache: Failed to open gnttab device");
+ exit(EXIT_FAILURE);
+ }
+
if (HOST_LONG_BITS == 32) {
bucket_shift = 16;
} else {
@@ -159,6 +171,15 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
mapcache = xen_map_cache_init_single(f, opaque,
bucket_shift,
max_mcache_size);
+
+ /*
+ * Grant mappings must use XC_PAGE_SIZE granularity since we can't
+ * map anything beyond the number of pages granted to us.
+ */
+ mapcache_grants = xen_map_cache_init_single(f, opaque,
+ XC_PAGE_SHIFT,
+ max_mcache_size);
+
setrlimit(RLIMIT_AS, &rlimit_as);
}
@@ -168,17 +189,24 @@ static void xen_remap_bucket(MapCache *mc,
hwaddr size,
hwaddr address_index,
bool dummy,
+ bool grant,
+ bool is_write,
ram_addr_t ram_offset)
{
uint8_t *vaddr_base;
- xen_pfn_t *pfns;
+ uint32_t *refs = NULL;
+ xen_pfn_t *pfns = NULL;
int *err;
unsigned int i;
hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
trace_xen_remap_bucket(address_index);
- pfns = g_new0(xen_pfn_t, nb_pfn);
+ if (grant) {
+ refs = g_new0(uint32_t, nb_pfn);
+ } else {
+ pfns = g_new0(xen_pfn_t, nb_pfn);
+ }
err = g_new0(int, nb_pfn);
if (entry->vaddr_base != NULL) {
@@ -207,21 +235,51 @@ static void xen_remap_bucket(MapCache *mc,
g_free(entry->valid_mapping);
entry->valid_mapping = NULL;
- for (i = 0; i < nb_pfn; i++) {
- pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
+ if (grant) {
+ hwaddr grant_base = address_index - (ram_offset >> XC_PAGE_SHIFT);
+
+ for (i = 0; i < nb_pfn; i++) {
+ refs[i] = grant_base + i;
+ }
+ } else {
+ for (i = 0; i < nb_pfn; i++) {
+ pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
+ }
}
- /*
- * If the caller has requested the mapping at a specific address use
- * MAP_FIXED to make sure it's honored.
- */
+ entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT;
+
if (!dummy) {
- vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
- PROT_READ | PROT_WRITE,
- vaddr ? MAP_FIXED : 0,
- nb_pfn, pfns, err);
+ if (grant) {
+ int prot = PROT_READ;
+
+ if (is_write) {
+ prot |= PROT_WRITE;
+ }
+
+ entry->flags |= XEN_MAPCACHE_ENTRY_GRANT;
+ assert(vaddr == NULL);
+ vaddr_base = xengnttab_map_domain_grant_refs(xen_region_gnttabdev,
+ nb_pfn,
+ xen_domid, refs,
+ prot);
+ } else {
+ /*
+ * If the caller has requested the mapping at a specific address use
+ * MAP_FIXED to make sure it's honored.
+ *
+ * We don't yet support upgrading mappings from RO to RW, to handle
+ * models using ordinary address_space_rw(), foreign mappings ignore
+ * is_write and are always mapped RW.
+ */
+ vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
+ PROT_READ | PROT_WRITE,
+ vaddr ? MAP_FIXED : 0,
+ nb_pfn, pfns, err);
+ }
if (vaddr_base == NULL) {
- perror("xenforeignmemory_map2");
+ perror(grant ? "xengnttab_map_domain_grant_refs"
+ : "xenforeignmemory_map2");
exit(-1);
}
} else {
@@ -261,6 +319,7 @@ static void xen_remap_bucket(MapCache *mc,
}
}
+ g_free(refs);
g_free(pfns);
g_free(err);
}
@@ -268,7 +327,8 @@ static void xen_remap_bucket(MapCache *mc,
static uint8_t *xen_map_cache_unlocked(MapCache *mc,
hwaddr phys_addr, hwaddr size,
ram_addr_t ram_offset,
- uint8_t lock, bool dma, bool is_write)
+ uint8_t lock, bool dma,
+ bool grant, bool is_write)
{
MapCacheEntry *entry, *pentry = NULL,
*free_entry = NULL, *free_pentry = NULL;
@@ -340,7 +400,7 @@ tryagain:
entry = g_new0(MapCacheEntry, 1);
pentry->next = entry;
xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
- ram_offset);
+ grant, is_write, ram_offset);
} else if (!entry->lock) {
if (!entry->vaddr_base || entry->paddr_index != address_index ||
entry->size != cache_size ||
@@ -348,7 +408,7 @@ tryagain:
test_bit_size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
- ram_offset);
+ grant, is_write, ram_offset);
}
}
@@ -399,12 +459,28 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
uint8_t lock, bool dma,
bool is_write)
{
+ bool grant = xen_mr_is_grants(mr);
+ MapCache *mc = grant ? mapcache_grants : mapcache;
uint8_t *p;
- mapcache_lock(mapcache);
- p = xen_map_cache_unlocked(mapcache, phys_addr, size, ram_addr_offset,
- lock, dma, is_write);
- mapcache_unlock(mapcache);
+ if (grant) {
+ /*
+ * Grants are only supported via address_space_map(). Anything
+ * else is considered a user/guest error.
+ *
+ * QEMU generally doesn't expect these mappings to ever fail, so
+ * if this happens we report an error message and abort().
+ */
+ if (!lock) {
+ error_report("Trying access a grant reference without mapping it.");
+ abort();
+ }
+ }
+
+ mapcache_lock(mc);
+ p = xen_map_cache_unlocked(mc, phys_addr, size, ram_addr_offset,
+ lock, dma, grant, is_write);
+ mapcache_unlock(mc);
return p;
}
@@ -449,7 +525,14 @@ static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
{
- return xen_ram_addr_from_mapcache_single(mapcache, ptr);
+ ram_addr_t addr;
+
+ addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
+ if (addr == RAM_ADDR_INVALID) {
+ addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
+ }
+
+ return addr;
}
static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
@@ -460,6 +543,7 @@ static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
hwaddr paddr_index;
hwaddr size;
int found = 0;
+ int rc;
QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
if (reventry->vaddr_req == buffer) {
@@ -502,7 +586,14 @@ static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
}
ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
- if (munmap(entry->vaddr_base, entry->size) != 0) {
+ if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) {
+ rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base,
+ entry->size >> mc->bucket_shift);
+ } else {
+ rc = munmap(entry->vaddr_base, entry->size);
+ }
+
+ if (rc) {
perror("unmap fails");
exit(-1);
}
@@ -521,14 +612,24 @@ typedef struct XenMapCacheData {
uint8_t *buffer;
} XenMapCacheData;
+static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
+{
+ mapcache_lock(mc);
+ xen_invalidate_map_cache_entry_unlocked(mc, buffer);
+ mapcache_unlock(mc);
+}
+
+static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
+{
+ xen_invalidate_map_cache_entry_single(mapcache, buffer);
+ xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
+}
+
static void xen_invalidate_map_cache_entry_bh(void *opaque)
{
XenMapCacheData *data = opaque;
- mapcache_lock(mapcache);
- xen_invalidate_map_cache_entry_unlocked(mapcache, data->buffer);
- mapcache_unlock(mapcache);
-
+ xen_invalidate_map_cache_entry_all(data->buffer);
aio_co_wake(data->co);
}
@@ -543,9 +644,7 @@ void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer)
xen_invalidate_map_cache_entry_bh, &data);
qemu_coroutine_yield();
} else {
- mapcache_lock(mapcache);
- xen_invalidate_map_cache_entry_unlocked(mapcache, buffer);
- mapcache_unlock(mapcache);
+ xen_invalidate_map_cache_entry_all(buffer);
}
}
@@ -597,6 +696,7 @@ void xen_invalidate_map_cache(void)
bdrv_drain_all();
xen_invalidate_map_cache_single(mapcache);
+ xen_invalidate_map_cache_single(mapcache_grants);
}
static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
@@ -632,13 +732,16 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
return NULL;
}
+ assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0);
+
address_index = new_phys_addr >> mc->bucket_shift;
address_offset = new_phys_addr & (mc->bucket_size - 1);
trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
xen_remap_bucket(mc, entry, entry->vaddr_base,
- cache_size, address_index, false, old_phys_addr);
+ cache_size, address_index, false,
+ false, false, old_phys_addr);
if (!test_bits(address_offset >> XC_PAGE_SHIFT,
test_bit_size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
index 65a51aac2e..3d796235dc 100644
--- a/include/hw/xen/xen-hvm-common.h
+++ b/include/hw/xen/xen-hvm-common.h
@@ -16,6 +16,7 @@
#include <xen/hvm/ioreq.h>
extern MemoryRegion xen_memory;
+extern MemoryRegion xen_grants;
extern MemoryListener xen_io_listener;
extern DeviceListener xen_device_listener;
@@ -29,6 +30,8 @@ extern DeviceListener xen_device_listener;
do { } while (0)
#endif
+#define XEN_GRANT_ADDR_OFF (1ULL << 63)
+
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
{
return shared_page->vcpu_ioreq[i].vp_eport;
diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
index dc72f83bcb..19dccf4d71 100644
--- a/include/sysemu/xen.h
+++ b/include/sysemu/xen.h
@@ -35,6 +35,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr, Error **errp);
bool xen_mr_is_memory(MemoryRegion *mr);
+bool xen_mr_is_grants(MemoryRegion *mr);
#else /* !CONFIG_XEN_IS_POSSIBLE */
@@ -55,6 +56,12 @@ static inline bool xen_mr_is_memory(MemoryRegion *mr)
return false;
}
+static inline bool xen_mr_is_grants(MemoryRegion *mr)
+{
+ g_assert_not_reached();
+ return false;
+}
+
#endif /* CONFIG_XEN_IS_POSSIBLE */
#endif
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v6 7/8] xen: mapcache: Add support for grant mappings
2024-05-16 15:48 ` [PATCH v6 7/8] xen: mapcache: Add support for grant mappings Edgar E. Iglesias
@ 2024-05-23 7:35 ` Manos Pitsidianakis
2024-05-23 10:23 ` Edgar E. Iglesias
0 siblings, 1 reply; 17+ messages in thread
From: Manos Pitsidianakis @ 2024-05-23 7:35 UTC (permalink / raw)
To: xen-devel, Edgar E. Iglesias, qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant, xen-devel
On Thu, 16 May 2024 18:48, "Edgar E. Iglesias" <edgar.iglesias@gmail.com> wrote:
>From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
>Add a second mapcache for grant mappings. The mapcache for
>grants needs to work with XC_PAGE_SIZE granularity since
>we can't map larger ranges than what has been granted to us.
>
>Like with foreign mappings (xen_memory), machines using grants
>are expected to initialize the xen_grants MR and map it
>into their address-map accordingly.
>
>Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
>Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
>---
> hw/xen/xen-hvm-common.c | 12 ++-
> hw/xen/xen-mapcache.c | 163 ++++++++++++++++++++++++++------
> include/hw/xen/xen-hvm-common.h | 3 +
> include/sysemu/xen.h | 7 ++
> 4 files changed, 152 insertions(+), 33 deletions(-)
>
>diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
>index a0a0252da0..b8ace1c368 100644
>--- a/hw/xen/xen-hvm-common.c
>+++ b/hw/xen/xen-hvm-common.c
>@@ -10,12 +10,18 @@
> #include "hw/boards.h"
> #include "hw/xen/arch_hvm.h"
>
>-MemoryRegion xen_memory;
>+MemoryRegion xen_memory, xen_grants;
>
>-/* Check for xen memory. */
>+/* Check for any kind of xen memory, foreign mappings or grants. */
> bool xen_mr_is_memory(MemoryRegion *mr)
> {
>- return mr == &xen_memory;
>+ return mr == &xen_memory || mr == &xen_grants;
>+}
>+
>+/* Check specifically for grants. */
>+bool xen_mr_is_grants(MemoryRegion *mr)
>+{
>+ return mr == &xen_grants;
> }
>
> void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
>diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
>index a07c47b0b1..1cbc2aeaa9 100644
>--- a/hw/xen/xen-mapcache.c
>+++ b/hw/xen/xen-mapcache.c
>@@ -14,6 +14,7 @@
>
> #include <sys/resource.h>
>
>+#include "hw/xen/xen-hvm-common.h"
> #include "hw/xen/xen_native.h"
> #include "qemu/bitmap.h"
>
>@@ -21,6 +22,8 @@
> #include "sysemu/xen-mapcache.h"
> #include "trace.h"
>
>+#include <xenevtchn.h>
>+#include <xengnttab.h>
>
> #if HOST_LONG_BITS == 32
> # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
>@@ -41,6 +44,7 @@ typedef struct MapCacheEntry {
> unsigned long *valid_mapping;
> uint32_t lock;
> #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
>+#define XEN_MAPCACHE_ENTRY_GRANT (1 << 1)
Might we get more entry kinds in the future? (for example foreign maps).
Maybe this could be an enum.
> uint8_t flags;
> hwaddr size;
> struct MapCacheEntry *next;
>@@ -71,6 +75,8 @@ typedef struct MapCache {
> } MapCache;
>
> static MapCache *mapcache;
>+static MapCache *mapcache_grants;
>+static xengnttab_handle *xen_region_gnttabdev;
>
> static inline void mapcache_lock(MapCache *mc)
> {
>@@ -131,6 +137,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
> unsigned long max_mcache_size;
> unsigned int bucket_shift;
>
>+ xen_region_gnttabdev = xengnttab_open(NULL, 0);
>+ if (xen_region_gnttabdev == NULL) {
>+ error_report("mapcache: Failed to open gnttab device");
>+ exit(EXIT_FAILURE);
>+ }
>+
> if (HOST_LONG_BITS == 32) {
> bucket_shift = 16;
> } else {
>@@ -159,6 +171,15 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
> mapcache = xen_map_cache_init_single(f, opaque,
> bucket_shift,
> max_mcache_size);
>+
>+ /*
>+ * Grant mappings must use XC_PAGE_SIZE granularity since we can't
>+ * map anything beyond the number of pages granted to us.
>+ */
>+ mapcache_grants = xen_map_cache_init_single(f, opaque,
>+ XC_PAGE_SHIFT,
>+ max_mcache_size);
>+
> setrlimit(RLIMIT_AS, &rlimit_as);
> }
>
>@@ -168,17 +189,24 @@ static void xen_remap_bucket(MapCache *mc,
> hwaddr size,
> hwaddr address_index,
> bool dummy,
>+ bool grant,
>+ bool is_write,
> ram_addr_t ram_offset)
> {
> uint8_t *vaddr_base;
>- xen_pfn_t *pfns;
>+ uint32_t *refs = NULL;
>+ xen_pfn_t *pfns = NULL;
> int *err;
You should use g_autofree to perform automatic cleanup on function exit
instead of manually freeing, since the allocations should only live
within the function call.
> unsigned int i;
> hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
>
> trace_xen_remap_bucket(address_index);
>
>- pfns = g_new0(xen_pfn_t, nb_pfn);
>+ if (grant) {
>+ refs = g_new0(uint32_t, nb_pfn);
>+ } else {
>+ pfns = g_new0(xen_pfn_t, nb_pfn);
>+ }
> err = g_new0(int, nb_pfn);
>
> if (entry->vaddr_base != NULL) {
>@@ -207,21 +235,51 @@ static void xen_remap_bucket(MapCache *mc,
> g_free(entry->valid_mapping);
> entry->valid_mapping = NULL;
>
>- for (i = 0; i < nb_pfn; i++) {
>- pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
>+ if (grant) {
>+ hwaddr grant_base = address_index - (ram_offset >> XC_PAGE_SHIFT);
>+
>+ for (i = 0; i < nb_pfn; i++) {
>+ refs[i] = grant_base + i;
>+ }
>+ } else {
>+ for (i = 0; i < nb_pfn; i++) {
>+ pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
>+ }
> }
>
>- /*
>- * If the caller has requested the mapping at a specific address use
>- * MAP_FIXED to make sure it's honored.
>- */
>+ entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT;
>+
> if (!dummy) {
>- vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
>- PROT_READ | PROT_WRITE,
>- vaddr ? MAP_FIXED : 0,
>- nb_pfn, pfns, err);
Since err is not NULL here, the function might return a valid pointer
but individual frames might have failed.
>+ if (grant) {
>+ int prot = PROT_READ;
>+
>+ if (is_write) {
>+ prot |= PROT_WRITE;
>+ }
>+
>+ entry->flags |= XEN_MAPCACHE_ENTRY_GRANT;
>+ assert(vaddr == NULL);
>+ vaddr_base = xengnttab_map_domain_grant_refs(xen_region_gnttabdev,
>+ nb_pfn,
>+ xen_domid, refs,
>+ prot);
>+ } else {
>+ /*
>+ * If the caller has requested the mapping at a specific address use
>+ * MAP_FIXED to make sure it's honored.
>+ *
>+ * We don't yet support upgrading mappings from RO to RW, to handle
>+ * models using ordinary address_space_rw(), foreign mappings ignore
>+ * is_write and are always mapped RW.
>+ */
>+ vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
>+ PROT_READ | PROT_WRITE,
>+ vaddr ? MAP_FIXED : 0,
>+ nb_pfn, pfns, err);
>+ }
> if (vaddr_base == NULL) {
>- perror("xenforeignmemory_map2");
>+ perror(grant ? "xengnttab_map_domain_grant_refs"
>+ : "xenforeignmemory_map2");
> exit(-1);
> }
> } else {
>@@ -261,6 +319,7 @@ static void xen_remap_bucket(MapCache *mc,
> }
> }
>
>+ g_free(refs);
> g_free(pfns);
> g_free(err);
> }
>@@ -268,7 +327,8 @@ static void xen_remap_bucket(MapCache *mc,
> static uint8_t *xen_map_cache_unlocked(MapCache *mc,
> hwaddr phys_addr, hwaddr size,
> ram_addr_t ram_offset,
>- uint8_t lock, bool dma, bool is_write)
>+ uint8_t lock, bool dma,
>+ bool grant, bool is_write)
> {
> MapCacheEntry *entry, *pentry = NULL,
> *free_entry = NULL, *free_pentry = NULL;
>@@ -340,7 +400,7 @@ tryagain:
> entry = g_new0(MapCacheEntry, 1);
> pentry->next = entry;
> xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
>- ram_offset);
>+ grant, is_write, ram_offset);
> } else if (!entry->lock) {
> if (!entry->vaddr_base || entry->paddr_index != address_index ||
> entry->size != cache_size ||
>@@ -348,7 +408,7 @@ tryagain:
> test_bit_size >> XC_PAGE_SHIFT,
> entry->valid_mapping)) {
> xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
>- ram_offset);
>+ grant, is_write, ram_offset);
> }
> }
>
>@@ -399,12 +459,28 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
> uint8_t lock, bool dma,
> bool is_write)
> {
>+ bool grant = xen_mr_is_grants(mr);
>+ MapCache *mc = grant ? mapcache_grants : mapcache;
> uint8_t *p;
>
>- mapcache_lock(mapcache);
>- p = xen_map_cache_unlocked(mapcache, phys_addr, size, ram_addr_offset,
>- lock, dma, is_write);
>- mapcache_unlock(mapcache);
>+ if (grant) {
>+ /*
>+ * Grants are only supported via address_space_map(). Anything
>+ * else is considered a user/guest error.
>+ *
>+ * QEMU generally doesn't expect these mappings to ever fail, so
>+ * if this happens we report an error message and abort().
>+ */
>+ if (!lock) {
Nested if conditions that can be flattened, i.e. this could be
if (grant && !lock)
>+ error_report("Trying access a grant reference without mapping it.");
s/Trying access a grant/Tried to access a grant/
>+ abort();
>+ }
>+ }
>+
>+ mapcache_lock(mc);
>+ p = xen_map_cache_unlocked(mc, phys_addr, size, ram_addr_offset,
>+ lock, dma, grant, is_write);
>+ mapcache_unlock(mc);
> return p;
> }
>
>@@ -449,7 +525,14 @@ static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
>
> ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
> {
>- return xen_ram_addr_from_mapcache_single(mapcache, ptr);
>+ ram_addr_t addr;
>+
>+ addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
>+ if (addr == RAM_ADDR_INVALID) {
>+ addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
>+ }
>+
>+ return addr;
> }
>
> static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
>@@ -460,6 +543,7 @@ static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
> hwaddr paddr_index;
> hwaddr size;
> int found = 0;
>+ int rc;
>
> QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
> if (reventry->vaddr_req == buffer) {
>@@ -502,7 +586,14 @@ static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
> }
>
> ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
>- if (munmap(entry->vaddr_base, entry->size) != 0) {
>+ if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) {
>+ rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base,
>+ entry->size >> mc->bucket_shift);
>+ } else {
>+ rc = munmap(entry->vaddr_base, entry->size);
>+ }
>+
>+ if (rc) {
> perror("unmap fails");
> exit(-1);
> }
>@@ -521,14 +612,24 @@ typedef struct XenMapCacheData {
> uint8_t *buffer;
> } XenMapCacheData;
>
>+static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
>+{
>+ mapcache_lock(mc);
>+ xen_invalidate_map_cache_entry_unlocked(mc, buffer);
>+ mapcache_unlock(mc);
>+}
>+
>+static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
>+{
>+ xen_invalidate_map_cache_entry_single(mapcache, buffer);
>+ xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
>+}
>+
> static void xen_invalidate_map_cache_entry_bh(void *opaque)
> {
> XenMapCacheData *data = opaque;
>
>- mapcache_lock(mapcache);
>- xen_invalidate_map_cache_entry_unlocked(mapcache, data->buffer);
>- mapcache_unlock(mapcache);
>-
>+ xen_invalidate_map_cache_entry_all(data->buffer);
> aio_co_wake(data->co);
> }
>
>@@ -543,9 +644,7 @@ void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer)
> xen_invalidate_map_cache_entry_bh, &data);
> qemu_coroutine_yield();
> } else {
>- mapcache_lock(mapcache);
>- xen_invalidate_map_cache_entry_unlocked(mapcache, buffer);
>- mapcache_unlock(mapcache);
>+ xen_invalidate_map_cache_entry_all(buffer);
> }
> }
>
>@@ -597,6 +696,7 @@ void xen_invalidate_map_cache(void)
> bdrv_drain_all();
>
> xen_invalidate_map_cache_single(mapcache);
>+ xen_invalidate_map_cache_single(mapcache_grants);
> }
>
> static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
>@@ -632,13 +732,16 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
> return NULL;
> }
>
>+ assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0);
>+
> address_index = new_phys_addr >> mc->bucket_shift;
> address_offset = new_phys_addr & (mc->bucket_size - 1);
>
> trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
>
> xen_remap_bucket(mc, entry, entry->vaddr_base,
>- cache_size, address_index, false, old_phys_addr);
>+ cache_size, address_index, false,
>+ false, false, old_phys_addr);
> if (!test_bits(address_offset >> XC_PAGE_SHIFT,
> test_bit_size >> XC_PAGE_SHIFT,
> entry->valid_mapping)) {
>diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
>index 65a51aac2e..3d796235dc 100644
>--- a/include/hw/xen/xen-hvm-common.h
>+++ b/include/hw/xen/xen-hvm-common.h
>@@ -16,6 +16,7 @@
> #include <xen/hvm/ioreq.h>
>
> extern MemoryRegion xen_memory;
>+extern MemoryRegion xen_grants;
> extern MemoryListener xen_io_listener;
> extern DeviceListener xen_device_listener;
>
>@@ -29,6 +30,8 @@ extern DeviceListener xen_device_listener;
> do { } while (0)
> #endif
>
>+#define XEN_GRANT_ADDR_OFF (1ULL << 63)
>+
> static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
> {
> return shared_page->vcpu_ioreq[i].vp_eport;
>diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
>index dc72f83bcb..19dccf4d71 100644
>--- a/include/sysemu/xen.h
>+++ b/include/sysemu/xen.h
>@@ -35,6 +35,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
> struct MemoryRegion *mr, Error **errp);
>
> bool xen_mr_is_memory(MemoryRegion *mr);
>+bool xen_mr_is_grants(MemoryRegion *mr);
>
> #else /* !CONFIG_XEN_IS_POSSIBLE */
>
>@@ -55,6 +56,12 @@ static inline bool xen_mr_is_memory(MemoryRegion *mr)
> return false;
> }
>
>+static inline bool xen_mr_is_grants(MemoryRegion *mr)
>+{
>+ g_assert_not_reached();
>+ return false;
>+}
>+
> #endif /* CONFIG_XEN_IS_POSSIBLE */
>
> #endif
>--
>2.40.1
>
>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v6 7/8] xen: mapcache: Add support for grant mappings
2024-05-23 7:35 ` Manos Pitsidianakis
@ 2024-05-23 10:23 ` Edgar E. Iglesias
2024-05-23 18:30 ` Stefano Stabellini
0 siblings, 1 reply; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-23 10:23 UTC (permalink / raw)
To: Manos Pitsidianakis
Cc: xen-devel, qemu-devel, sstabellini, jgross, Edgar E. Iglesias,
Anthony PERARD, Paul Durrant
[-- Attachment #1: Type: text/plain, Size: 17666 bytes --]
On Thu, May 23, 2024 at 9:47 AM Manos Pitsidianakis <
manos.pitsidianakis@linaro.org> wrote:
> On Thu, 16 May 2024 18:48, "Edgar E. Iglesias" <edgar.iglesias@gmail.com>
> wrote:
> >From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
> >
> >Add a second mapcache for grant mappings. The mapcache for
> >grants needs to work with XC_PAGE_SIZE granularity since
> >we can't map larger ranges than what has been granted to us.
> >
> >Like with foreign mappings (xen_memory), machines using grants
> >are expected to initialize the xen_grants MR and map it
> >into their address-map accordingly.
> >
> >Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> >Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> >---
> > hw/xen/xen-hvm-common.c | 12 ++-
> > hw/xen/xen-mapcache.c | 163 ++++++++++++++++++++++++++------
> > include/hw/xen/xen-hvm-common.h | 3 +
> > include/sysemu/xen.h | 7 ++
> > 4 files changed, 152 insertions(+), 33 deletions(-)
> >
> >diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
> >index a0a0252da0..b8ace1c368 100644
> >--- a/hw/xen/xen-hvm-common.c
> >+++ b/hw/xen/xen-hvm-common.c
> >@@ -10,12 +10,18 @@
> > #include "hw/boards.h"
> > #include "hw/xen/arch_hvm.h"
> >
> >-MemoryRegion xen_memory;
> >+MemoryRegion xen_memory, xen_grants;
> >
> >-/* Check for xen memory. */
> >+/* Check for any kind of xen memory, foreign mappings or grants. */
> > bool xen_mr_is_memory(MemoryRegion *mr)
> > {
> >- return mr == &xen_memory;
> >+ return mr == &xen_memory || mr == &xen_grants;
> >+}
> >+
> >+/* Check specifically for grants. */
> >+bool xen_mr_is_grants(MemoryRegion *mr)
> >+{
> >+ return mr == &xen_grants;
> > }
> >
> > void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion
> *mr,
> >diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
> >index a07c47b0b1..1cbc2aeaa9 100644
> >--- a/hw/xen/xen-mapcache.c
> >+++ b/hw/xen/xen-mapcache.c
> >@@ -14,6 +14,7 @@
> >
> > #include <sys/resource.h>
> >
> >+#include "hw/xen/xen-hvm-common.h"
> > #include "hw/xen/xen_native.h"
> > #include "qemu/bitmap.h"
> >
> >@@ -21,6 +22,8 @@
> > #include "sysemu/xen-mapcache.h"
> > #include "trace.h"
> >
> >+#include <xenevtchn.h>
> >+#include <xengnttab.h>
> >
> > #if HOST_LONG_BITS == 32
> > # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
> >@@ -41,6 +44,7 @@ typedef struct MapCacheEntry {
> > unsigned long *valid_mapping;
> > uint32_t lock;
> > #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
> >+#define XEN_MAPCACHE_ENTRY_GRANT (1 << 1)
>
> Might we get more entry kinds in the future? (for example foreign maps).
> Maybe this could be an enum.
>
>
Perhaps. Foreign mappings are already supported, this flag separates
ordinary foreign mappings from grant foreign mappings.
IMO, since this is not an external interface it's probably better to change
it once we have a concrete use-case at hand.
> > uint8_t flags;
> > hwaddr size;
> > struct MapCacheEntry *next;
> >@@ -71,6 +75,8 @@ typedef struct MapCache {
> > } MapCache;
> >
> > static MapCache *mapcache;
> >+static MapCache *mapcache_grants;
> >+static xengnttab_handle *xen_region_gnttabdev;
> >
> > static inline void mapcache_lock(MapCache *mc)
> > {
> >@@ -131,6 +137,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f,
> void *opaque)
> > unsigned long max_mcache_size;
> > unsigned int bucket_shift;
> >
> >+ xen_region_gnttabdev = xengnttab_open(NULL, 0);
> >+ if (xen_region_gnttabdev == NULL) {
> >+ error_report("mapcache: Failed to open gnttab device");
> >+ exit(EXIT_FAILURE);
> >+ }
> >+
> > if (HOST_LONG_BITS == 32) {
> > bucket_shift = 16;
> > } else {
> >@@ -159,6 +171,15 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f,
> void *opaque)
> > mapcache = xen_map_cache_init_single(f, opaque,
> > bucket_shift,
> > max_mcache_size);
> >+
> >+ /*
> >+ * Grant mappings must use XC_PAGE_SIZE granularity since we can't
> >+ * map anything beyond the number of pages granted to us.
> >+ */
> >+ mapcache_grants = xen_map_cache_init_single(f, opaque,
> >+ XC_PAGE_SHIFT,
> >+ max_mcache_size);
> >+
> > setrlimit(RLIMIT_AS, &rlimit_as);
> > }
> >
> >@@ -168,17 +189,24 @@ static void xen_remap_bucket(MapCache *mc,
> > hwaddr size,
> > hwaddr address_index,
> > bool dummy,
> >+ bool grant,
> >+ bool is_write,
> > ram_addr_t ram_offset)
> > {
> > uint8_t *vaddr_base;
> >- xen_pfn_t *pfns;
> >+ uint32_t *refs = NULL;
> >+ xen_pfn_t *pfns = NULL;
> > int *err;
>
> You should use g_autofree to perform automatic cleanup on function exit
> instead of manually freeing, since the allocations should only live
> within the function call.
>
>
Sounds good, I'll do that in the next version.
> > unsigned int i;
> > hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
> >
> > trace_xen_remap_bucket(address_index);
> >
> >- pfns = g_new0(xen_pfn_t, nb_pfn);
> >+ if (grant) {
> >+ refs = g_new0(uint32_t, nb_pfn);
> >+ } else {
> >+ pfns = g_new0(xen_pfn_t, nb_pfn);
> >+ }
> > err = g_new0(int, nb_pfn);
> >
> > if (entry->vaddr_base != NULL) {
> >@@ -207,21 +235,51 @@ static void xen_remap_bucket(MapCache *mc,
> > g_free(entry->valid_mapping);
> > entry->valid_mapping = NULL;
> >
> >- for (i = 0; i < nb_pfn; i++) {
> >- pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT))
> + i;
> >+ if (grant) {
> >+ hwaddr grant_base = address_index - (ram_offset >>
> XC_PAGE_SHIFT);
> >+
> >+ for (i = 0; i < nb_pfn; i++) {
> >+ refs[i] = grant_base + i;
> >+ }
> >+ } else {
> >+ for (i = 0; i < nb_pfn; i++) {
> >+ pfns[i] = (address_index << (mc->bucket_shift -
> XC_PAGE_SHIFT)) + i;
> >+ }
> > }
> >
> >- /*
> >- * If the caller has requested the mapping at a specific address use
> >- * MAP_FIXED to make sure it's honored.
> >- */
> >+ entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT;
> >+
> > if (!dummy) {
> >- vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
> >- PROT_READ | PROT_WRITE,
> >- vaddr ? MAP_FIXED : 0,
> >- nb_pfn, pfns, err);
>
> Since err is not NULL here, the function might return a valid pointer
> but individual frames might have failed.
>
>
Yes but AFAICT, the case when some pages fail foreign mapping is handled
further down the function (see the valid_mappings bitmap).
Note that this series isn't really changing this existing behaviour for
foreign mappings. In any case, If we spot a bug in existing code, I'm happy
to fix it.
>
> >+ if (grant) {
> >+ int prot = PROT_READ;
> >+
> >+ if (is_write) {
> >+ prot |= PROT_WRITE;
> >+ }
> >+
> >+ entry->flags |= XEN_MAPCACHE_ENTRY_GRANT;
> >+ assert(vaddr == NULL);
> >+ vaddr_base =
> xengnttab_map_domain_grant_refs(xen_region_gnttabdev,
> >+ nb_pfn,
> >+ xen_domid, refs,
> >+ prot);
> >+ } else {
> >+ /*
> >+ * If the caller has requested the mapping at a specific
> address use
> >+ * MAP_FIXED to make sure it's honored.
> >+ *
> >+ * We don't yet support upgrading mappings from RO to RW, to
> handle
> >+ * models using ordinary address_space_rw(), foreign
> mappings ignore
> >+ * is_write and are always mapped RW.
> >+ */
> >+ vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid,
> vaddr,
> >+ PROT_READ | PROT_WRITE,
> >+ vaddr ? MAP_FIXED : 0,
> >+ nb_pfn, pfns, err);
> >+ }
> > if (vaddr_base == NULL) {
> >- perror("xenforeignmemory_map2");
> >+ perror(grant ? "xengnttab_map_domain_grant_refs"
> >+ : "xenforeignmemory_map2");
> > exit(-1);
> > }
> > } else {
> >@@ -261,6 +319,7 @@ static void xen_remap_bucket(MapCache *mc,
> > }
> > }
> >
> >+ g_free(refs);
> > g_free(pfns);
> > g_free(err);
> > }
> >@@ -268,7 +327,8 @@ static void xen_remap_bucket(MapCache *mc,
> > static uint8_t *xen_map_cache_unlocked(MapCache *mc,
> > hwaddr phys_addr, hwaddr size,
> > ram_addr_t ram_offset,
> >- uint8_t lock, bool dma, bool
> is_write)
> >+ uint8_t lock, bool dma,
> >+ bool grant, bool is_write)
> > {
> > MapCacheEntry *entry, *pentry = NULL,
> > *free_entry = NULL, *free_pentry = NULL;
> >@@ -340,7 +400,7 @@ tryagain:
> > entry = g_new0(MapCacheEntry, 1);
> > pentry->next = entry;
> > xen_remap_bucket(mc, entry, NULL, cache_size, address_index,
> dummy,
> >- ram_offset);
> >+ grant, is_write, ram_offset);
> > } else if (!entry->lock) {
> > if (!entry->vaddr_base || entry->paddr_index != address_index ||
> > entry->size != cache_size ||
> >@@ -348,7 +408,7 @@ tryagain:
> > test_bit_size >> XC_PAGE_SHIFT,
> > entry->valid_mapping)) {
> > xen_remap_bucket(mc, entry, NULL, cache_size, address_index,
> dummy,
> >- ram_offset);
> >+ grant, is_write, ram_offset);
> > }
> > }
> >
> >@@ -399,12 +459,28 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
> > uint8_t lock, bool dma,
> > bool is_write)
> > {
> >+ bool grant = xen_mr_is_grants(mr);
> >+ MapCache *mc = grant ? mapcache_grants : mapcache;
> > uint8_t *p;
> >
> >- mapcache_lock(mapcache);
> >- p = xen_map_cache_unlocked(mapcache, phys_addr, size,
> ram_addr_offset,
> >- lock, dma, is_write);
> >- mapcache_unlock(mapcache);
> >+ if (grant) {
> >+ /*
> >+ * Grants are only supported via address_space_map(). Anything
> >+ * else is considered a user/guest error.
> >+ *
> >+ * QEMU generally doesn't expect these mappings to ever fail, so
> >+ * if this happens we report an error message and abort().
> >+ */
> >+ if (!lock) {
>
> Nested if conditions that can be flattened, i.e. this could be
>
> if (grant && !lock)
>
Sounds good, will flatten this in the next version.
>
> >+ error_report("Trying access a grant reference without
> mapping it.");
>
> s/Trying access a grant/Tried to access a grant/
>
>
Will fix it, thanks!
Best regards,
Edgar
> >+ abort();
> >+ }
> >+ }
> >+
> >+ mapcache_lock(mc);
> >+ p = xen_map_cache_unlocked(mc, phys_addr, size, ram_addr_offset,
> >+ lock, dma, grant, is_write);
> >+ mapcache_unlock(mc);
> > return p;
> > }
> >
> >@@ -449,7 +525,14 @@ static ram_addr_t
> xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
> >
> > ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
> > {
> >- return xen_ram_addr_from_mapcache_single(mapcache, ptr);
> >+ ram_addr_t addr;
> >+
> >+ addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
> >+ if (addr == RAM_ADDR_INVALID) {
> >+ addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
> >+ }
> >+
> >+ return addr;
> > }
> >
> > static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
> >@@ -460,6 +543,7 @@ static void
> xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
> > hwaddr paddr_index;
> > hwaddr size;
> > int found = 0;
> >+ int rc;
> >
> > QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
> > if (reventry->vaddr_req == buffer) {
> >@@ -502,7 +586,14 @@ static void
> xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
> > }
> >
> > ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
> >- if (munmap(entry->vaddr_base, entry->size) != 0) {
> >+ if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) {
> >+ rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base,
> >+ entry->size >> mc->bucket_shift);
> >+ } else {
> >+ rc = munmap(entry->vaddr_base, entry->size);
> >+ }
> >+
> >+ if (rc) {
> > perror("unmap fails");
> > exit(-1);
> > }
> >@@ -521,14 +612,24 @@ typedef struct XenMapCacheData {
> > uint8_t *buffer;
> > } XenMapCacheData;
> >
> >+static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t
> *buffer)
> >+{
> >+ mapcache_lock(mc);
> >+ xen_invalidate_map_cache_entry_unlocked(mc, buffer);
> >+ mapcache_unlock(mc);
> >+}
> >+
> >+static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
> >+{
> >+ xen_invalidate_map_cache_entry_single(mapcache, buffer);
> >+ xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
> >+}
> >+
> > static void xen_invalidate_map_cache_entry_bh(void *opaque)
> > {
> > XenMapCacheData *data = opaque;
> >
> >- mapcache_lock(mapcache);
> >- xen_invalidate_map_cache_entry_unlocked(mapcache, data->buffer);
> >- mapcache_unlock(mapcache);
> >-
> >+ xen_invalidate_map_cache_entry_all(data->buffer);
> > aio_co_wake(data->co);
> > }
> >
> >@@ -543,9 +644,7 @@ void coroutine_mixed_fn
> xen_invalidate_map_cache_entry(uint8_t *buffer)
> > xen_invalidate_map_cache_entry_bh,
> &data);
> > qemu_coroutine_yield();
> > } else {
> >- mapcache_lock(mapcache);
> >- xen_invalidate_map_cache_entry_unlocked(mapcache, buffer);
> >- mapcache_unlock(mapcache);
> >+ xen_invalidate_map_cache_entry_all(buffer);
> > }
> > }
> >
> >@@ -597,6 +696,7 @@ void xen_invalidate_map_cache(void)
> > bdrv_drain_all();
> >
> > xen_invalidate_map_cache_single(mapcache);
> >+ xen_invalidate_map_cache_single(mapcache_grants);
> > }
> >
> > static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
> >@@ -632,13 +732,16 @@ static uint8_t
> *xen_replace_cache_entry_unlocked(MapCache *mc,
> > return NULL;
> > }
> >
> >+ assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0);
> >+
> > address_index = new_phys_addr >> mc->bucket_shift;
> > address_offset = new_phys_addr & (mc->bucket_size - 1);
> >
> > trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
> >
> > xen_remap_bucket(mc, entry, entry->vaddr_base,
> >- cache_size, address_index, false, old_phys_addr);
> >+ cache_size, address_index, false,
> >+ false, false, old_phys_addr);
> > if (!test_bits(address_offset >> XC_PAGE_SHIFT,
> > test_bit_size >> XC_PAGE_SHIFT,
> > entry->valid_mapping)) {
> >diff --git a/include/hw/xen/xen-hvm-common.h
> b/include/hw/xen/xen-hvm-common.h
> >index 65a51aac2e..3d796235dc 100644
> >--- a/include/hw/xen/xen-hvm-common.h
> >+++ b/include/hw/xen/xen-hvm-common.h
> >@@ -16,6 +16,7 @@
> > #include <xen/hvm/ioreq.h>
> >
> > extern MemoryRegion xen_memory;
> >+extern MemoryRegion xen_grants;
> > extern MemoryListener xen_io_listener;
> > extern DeviceListener xen_device_listener;
> >
> >@@ -29,6 +30,8 @@ extern DeviceListener xen_device_listener;
> > do { } while (0)
> > #endif
> >
> >+#define XEN_GRANT_ADDR_OFF (1ULL << 63)
> >+
> > static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int
> i)
> > {
> > return shared_page->vcpu_ioreq[i].vp_eport;
> >diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
> >index dc72f83bcb..19dccf4d71 100644
> >--- a/include/sysemu/xen.h
> >+++ b/include/sysemu/xen.h
> >@@ -35,6 +35,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
> > struct MemoryRegion *mr, Error **errp);
> >
> > bool xen_mr_is_memory(MemoryRegion *mr);
> >+bool xen_mr_is_grants(MemoryRegion *mr);
> >
> > #else /* !CONFIG_XEN_IS_POSSIBLE */
> >
> >@@ -55,6 +56,12 @@ static inline bool xen_mr_is_memory(MemoryRegion *mr)
> > return false;
> > }
> >
> >+static inline bool xen_mr_is_grants(MemoryRegion *mr)
> >+{
> >+ g_assert_not_reached();
> >+ return false;
> >+}
> >+
> > #endif /* CONFIG_XEN_IS_POSSIBLE */
> >
> > #endif
> >--
> >2.40.1
> >
> >
>
[-- Attachment #2: Type: text/html, Size: 23186 bytes --]
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v6 7/8] xen: mapcache: Add support for grant mappings
2024-05-23 10:23 ` Edgar E. Iglesias
@ 2024-05-23 18:30 ` Stefano Stabellini
0 siblings, 0 replies; 17+ messages in thread
From: Stefano Stabellini @ 2024-05-23 18:30 UTC (permalink / raw)
To: Edgar E. Iglesias
Cc: Manos Pitsidianakis, xen-devel, qemu-devel, sstabellini, jgross,
Edgar E. Iglesias, Anthony PERARD, Paul Durrant
[-- Attachment #1: Type: text/plain, Size: 14315 bytes --]
On Thu, 23 May 2024, Edgar E. Iglesias wrote:
> On Thu, May 23, 2024 at 9:47 AM Manos Pitsidianakis <manos.pitsidianakis@linaro.org> wrote:
> On Thu, 16 May 2024 18:48, "Edgar E. Iglesias" <edgar.iglesias@gmail.com> wrote:
> >From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
> >
> >Add a second mapcache for grant mappings. The mapcache for
> >grants needs to work with XC_PAGE_SIZE granularity since
> >we can't map larger ranges than what has been granted to us.
> >
> >Like with foreign mappings (xen_memory), machines using grants
> >are expected to initialize the xen_grants MR and map it
> >into their address-map accordingly.
> >
> >Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> >Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> >---
> > hw/xen/xen-hvm-common.c | 12 ++-
> > hw/xen/xen-mapcache.c | 163 ++++++++++++++++++++++++++------
> > include/hw/xen/xen-hvm-common.h | 3 +
> > include/sysemu/xen.h | 7 ++
> > 4 files changed, 152 insertions(+), 33 deletions(-)
> >
> >diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
> >index a0a0252da0..b8ace1c368 100644
> >--- a/hw/xen/xen-hvm-common.c
> >+++ b/hw/xen/xen-hvm-common.c
> >@@ -10,12 +10,18 @@
> > #include "hw/boards.h"
> > #include "hw/xen/arch_hvm.h"
> >
> >-MemoryRegion xen_memory;
> >+MemoryRegion xen_memory, xen_grants;
> >
> >-/* Check for xen memory. */
> >+/* Check for any kind of xen memory, foreign mappings or grants. */
> > bool xen_mr_is_memory(MemoryRegion *mr)
> > {
> >- return mr == &xen_memory;
> >+ return mr == &xen_memory || mr == &xen_grants;
> >+}
> >+
> >+/* Check specifically for grants. */
> >+bool xen_mr_is_grants(MemoryRegion *mr)
> >+{
> >+ return mr == &xen_grants;
> > }
> >
> > void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
> >diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
> >index a07c47b0b1..1cbc2aeaa9 100644
> >--- a/hw/xen/xen-mapcache.c
> >+++ b/hw/xen/xen-mapcache.c
> >@@ -14,6 +14,7 @@
> >
> > #include <sys/resource.h>
> >
> >+#include "hw/xen/xen-hvm-common.h"
> > #include "hw/xen/xen_native.h"
> > #include "qemu/bitmap.h"
> >
> >@@ -21,6 +22,8 @@
> > #include "sysemu/xen-mapcache.h"
> > #include "trace.h"
> >
> >+#include <xenevtchn.h>
> >+#include <xengnttab.h>
> >
> > #if HOST_LONG_BITS == 32
> > # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
> >@@ -41,6 +44,7 @@ typedef struct MapCacheEntry {
> > unsigned long *valid_mapping;
> > uint32_t lock;
> > #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
> >+#define XEN_MAPCACHE_ENTRY_GRANT (1 << 1)
>
> Might we get more entry kinds in the future? (for example foreign maps).
> Maybe this could be an enum.
>
>
> Perhaps. Foreign mappings are already supported, this flag separates ordinary foreign mappings from grant foreign mappings.
> IMO, since this is not an external interface it's probably better to change it once we have a concrete use-case at hand.
>
>
> > uint8_t flags;
> > hwaddr size;
> > struct MapCacheEntry *next;
> >@@ -71,6 +75,8 @@ typedef struct MapCache {
> > } MapCache;
> >
> > static MapCache *mapcache;
> >+static MapCache *mapcache_grants;
> >+static xengnttab_handle *xen_region_gnttabdev;
> >
> > static inline void mapcache_lock(MapCache *mc)
> > {
> >@@ -131,6 +137,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
> > unsigned long max_mcache_size;
> > unsigned int bucket_shift;
> >
> >+ xen_region_gnttabdev = xengnttab_open(NULL, 0);
> >+ if (xen_region_gnttabdev == NULL) {
> >+ error_report("mapcache: Failed to open gnttab device");
> >+ exit(EXIT_FAILURE);
> >+ }
> >+
> > if (HOST_LONG_BITS == 32) {
> > bucket_shift = 16;
> > } else {
> >@@ -159,6 +171,15 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
> > mapcache = xen_map_cache_init_single(f, opaque,
> > bucket_shift,
> > max_mcache_size);
> >+
> >+ /*
> >+ * Grant mappings must use XC_PAGE_SIZE granularity since we can't
> >+ * map anything beyond the number of pages granted to us.
> >+ */
> >+ mapcache_grants = xen_map_cache_init_single(f, opaque,
> >+ XC_PAGE_SHIFT,
> >+ max_mcache_size);
> >+
> > setrlimit(RLIMIT_AS, &rlimit_as);
> > }
> >
> >@@ -168,17 +189,24 @@ static void xen_remap_bucket(MapCache *mc,
> > hwaddr size,
> > hwaddr address_index,
> > bool dummy,
> >+ bool grant,
> >+ bool is_write,
> > ram_addr_t ram_offset)
> > {
> > uint8_t *vaddr_base;
> >- xen_pfn_t *pfns;
> >+ uint32_t *refs = NULL;
> >+ xen_pfn_t *pfns = NULL;
> > int *err;
>
> You should use g_autofree to perform automatic cleanup on function exit
> instead of manually freeing, since the allocations should only live
> within the function call.
>
>
> Sounds good, I'll do that in the next version.
>
>
> > unsigned int i;
> > hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
> >
> > trace_xen_remap_bucket(address_index);
> >
> >- pfns = g_new0(xen_pfn_t, nb_pfn);
> >+ if (grant) {
> >+ refs = g_new0(uint32_t, nb_pfn);
> >+ } else {
> >+ pfns = g_new0(xen_pfn_t, nb_pfn);
> >+ }
> > err = g_new0(int, nb_pfn);
> >
> > if (entry->vaddr_base != NULL) {
> >@@ -207,21 +235,51 @@ static void xen_remap_bucket(MapCache *mc,
> > g_free(entry->valid_mapping);
> > entry->valid_mapping = NULL;
> >
> >- for (i = 0; i < nb_pfn; i++) {
> >- pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
> >+ if (grant) {
> >+ hwaddr grant_base = address_index - (ram_offset >> XC_PAGE_SHIFT);
> >+
> >+ for (i = 0; i < nb_pfn; i++) {
> >+ refs[i] = grant_base + i;
> >+ }
> >+ } else {
> >+ for (i = 0; i < nb_pfn; i++) {
> >+ pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
> >+ }
> > }
> >
> >- /*
> >- * If the caller has requested the mapping at a specific address use
> >- * MAP_FIXED to make sure it's honored.
> >- */
> >+ entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT;
> >+
> > if (!dummy) {
> >- vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
> >- PROT_READ | PROT_WRITE,
> >- vaddr ? MAP_FIXED : 0,
> >- nb_pfn, pfns, err);
>
> Since err is not NULL here, the function might return a valid pointer
> but individual frames might have failed.
>
>
> Yes but AFAICT, the case when some pages fail foreign mapping is handled further down the function (see the valid_mappings bitmap).
> Note that this series isn't really changing this existing behaviour for foreign mappings. In any case, If we spot a bug in existing code,
> I'm happy to fix it.
>
>
>
> >+ if (grant) {
> >+ int prot = PROT_READ;
> >+
> >+ if (is_write) {
> >+ prot |= PROT_WRITE;
> >+ }
> >+
> >+ entry->flags |= XEN_MAPCACHE_ENTRY_GRANT;
> >+ assert(vaddr == NULL);
> >+ vaddr_base = xengnttab_map_domain_grant_refs(xen_region_gnttabdev,
> >+ nb_pfn,
> >+ xen_domid, refs,
> >+ prot);
> >+ } else {
> >+ /*
> >+ * If the caller has requested the mapping at a specific address use
> >+ * MAP_FIXED to make sure it's honored.
> >+ *
> >+ * We don't yet support upgrading mappings from RO to RW, to handle
> >+ * models using ordinary address_space_rw(), foreign mappings ignore
> >+ * is_write and are always mapped RW.
> >+ */
> >+ vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
> >+ PROT_READ | PROT_WRITE,
> >+ vaddr ? MAP_FIXED : 0,
> >+ nb_pfn, pfns, err);
> >+ }
> > if (vaddr_base == NULL) {
> >- perror("xenforeignmemory_map2");
> >+ perror(grant ? "xengnttab_map_domain_grant_refs"
> >+ : "xenforeignmemory_map2");
> > exit(-1);
> > }
> > } else {
> >@@ -261,6 +319,7 @@ static void xen_remap_bucket(MapCache *mc,
> > }
> > }
> >
> >+ g_free(refs);
> > g_free(pfns);
> > g_free(err);
> > }
> >@@ -268,7 +327,8 @@ static void xen_remap_bucket(MapCache *mc,
> > static uint8_t *xen_map_cache_unlocked(MapCache *mc,
> > hwaddr phys_addr, hwaddr size,
> > ram_addr_t ram_offset,
> >- uint8_t lock, bool dma, bool is_write)
> >+ uint8_t lock, bool dma,
> >+ bool grant, bool is_write)
> > {
> > MapCacheEntry *entry, *pentry = NULL,
> > *free_entry = NULL, *free_pentry = NULL;
> >@@ -340,7 +400,7 @@ tryagain:
> > entry = g_new0(MapCacheEntry, 1);
> > pentry->next = entry;
> > xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
> >- ram_offset);
> >+ grant, is_write, ram_offset);
> > } else if (!entry->lock) {
> > if (!entry->vaddr_base || entry->paddr_index != address_index ||
> > entry->size != cache_size ||
> >@@ -348,7 +408,7 @@ tryagain:
> > test_bit_size >> XC_PAGE_SHIFT,
> > entry->valid_mapping)) {
> > xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
> >- ram_offset);
> >+ grant, is_write, ram_offset);
> > }
> > }
> >
> >@@ -399,12 +459,28 @@ uint8_t *xen_map_cache(MemoryRegion *mr,
> > uint8_t lock, bool dma,
> > bool is_write)
> > {
> >+ bool grant = xen_mr_is_grants(mr);
> >+ MapCache *mc = grant ? mapcache_grants : mapcache;
> > uint8_t *p;
> >
> >- mapcache_lock(mapcache);
> >- p = xen_map_cache_unlocked(mapcache, phys_addr, size, ram_addr_offset,
> >- lock, dma, is_write);
> >- mapcache_unlock(mapcache);
> >+ if (grant) {
> >+ /*
> >+ * Grants are only supported via address_space_map(). Anything
> >+ * else is considered a user/guest error.
> >+ *
> >+ * QEMU generally doesn't expect these mappings to ever fail, so
> >+ * if this happens we report an error message and abort().
> >+ */
> >+ if (!lock) {
>
> Nested if conditions that can be flattened, i.e. this could be
>
> if (grant && !lock)
>
>
>
> Sounds good, will flatten this in the next version.
>
>
> >+ error_report("Trying access a grant reference without mapping it.");
>
> s/Trying access a grant/Tried to access a grant/
>
>
> Will fix it, thanks!
Please retain my ack
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v6 8/8] hw/arm: xen: Enable use of grant mappings
2024-05-16 15:47 [PATCH v6 0/8] xen: Support grant mappings Edgar E. Iglesias
` (6 preceding siblings ...)
2024-05-16 15:48 ` [PATCH v6 7/8] xen: mapcache: Add support for grant mappings Edgar E. Iglesias
@ 2024-05-16 15:48 ` Edgar E. Iglesias
2024-05-23 7:48 ` Manos Pitsidianakis
7 siblings, 1 reply; 17+ messages in thread
From: Edgar E. Iglesias @ 2024-05-16 15:48 UTC (permalink / raw)
To: qemu-devel
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Peter Maydell, qemu-arm
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
hw/arm/xen_arm.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
index 15fa7dfa84..6fad829ede 100644
--- a/hw/arm/xen_arm.c
+++ b/hw/arm/xen_arm.c
@@ -125,6 +125,11 @@ static void xen_init_ram(MachineState *machine)
GUEST_RAM1_BASE, ram_size[1]);
memory_region_add_subregion(sysmem, GUEST_RAM1_BASE, &ram_hi);
}
+
+ /* Setup support for grants. */
+ memory_region_init_ram(&xen_grants, NULL, "xen.grants", block_len,
+ &error_fatal);
+ memory_region_add_subregion(sysmem, XEN_GRANT_ADDR_OFF, &xen_grants);
}
void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
--
2.40.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v6 8/8] hw/arm: xen: Enable use of grant mappings
2024-05-16 15:48 ` [PATCH v6 8/8] hw/arm: xen: Enable use of " Edgar E. Iglesias
@ 2024-05-23 7:48 ` Manos Pitsidianakis
0 siblings, 0 replies; 17+ messages in thread
From: Manos Pitsidianakis @ 2024-05-23 7:48 UTC (permalink / raw)
To: qemu-devel, Edgar E. Iglesias
Cc: edgar.iglesias, sstabellini, jgross, Edgar E. Iglesias,
Peter Maydell, qemu-arm
On Thu, 16 May 2024 18:48, "Edgar E. Iglesias" <edgar.iglesias@gmail.com> wrote:
>From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
>Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
>Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
>---
> hw/arm/xen_arm.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
>diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
>index 15fa7dfa84..6fad829ede 100644
>--- a/hw/arm/xen_arm.c
>+++ b/hw/arm/xen_arm.c
>@@ -125,6 +125,11 @@ static void xen_init_ram(MachineState *machine)
> GUEST_RAM1_BASE, ram_size[1]);
> memory_region_add_subregion(sysmem, GUEST_RAM1_BASE, &ram_hi);
> }
>+
>+ /* Setup support for grants. */
>+ memory_region_init_ram(&xen_grants, NULL, "xen.grants", block_len,
>+ &error_fatal);
>+ memory_region_add_subregion(sysmem, XEN_GRANT_ADDR_OFF, &xen_grants);
> }
>
> void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
>--
>2.40.1
>
>
Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
^ permalink raw reply [flat|nested] 17+ messages in thread