* [PATCH v2 1/3] xen/arm: io: Extend write/read handler to pass private data
2015-09-29 14:44 [PATCH v2 0/3] xen/arm vgic-v3: Correctly retrieved the vCPU associated to a re-distributor Julien Grall
@ 2015-09-29 14:44 ` Julien Grall
2015-10-01 12:40 ` Ian Campbell
2015-09-29 14:44 ` [PATCH v2 2/3] xen/arm: vgic-v3: Correctly retrieve the vCPU associated to a re-distributor Julien Grall
` (2 subsequent siblings)
3 siblings, 1 reply; 7+ messages in thread
From: Julien Grall @ 2015-09-29 14:44 UTC (permalink / raw)
To: xen-devel
Cc: Julien Grall, stefano.stabellini, ian.campbell,
shameerali.kolothum.thodi
Some handlers may require to use private data in order to get quickly
information related to the region emulated.
Signed-off-by: Julien Grall <julien.grall@citrix.com>
---
Cc: shameerali.kolothum.thodi@huawei.com
This will be necessary in the follow-up in order to fix bug in the
GICR emulation.
Changes in v2:
- Use a break rather than a goto
---
xen/arch/arm/io.c | 21 +++++++++++++--------
xen/arch/arm/vgic-v2.c | 8 +++++---
xen/arch/arm/vgic-v3.c | 17 +++++++++++------
xen/arch/arm/vuart.c | 11 ++++++-----
xen/include/asm-arm/mmio.h | 7 ++++---
5 files changed, 39 insertions(+), 25 deletions(-)
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 8e55d49..b8f4a18 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -27,7 +27,7 @@ int handle_mmio(mmio_info_t *info)
{
struct vcpu *v = current;
int i;
- const struct mmio_handler *mmio_handler;
+ const struct mmio_handler *mmio_handler = NULL;
const struct io_handler *io_handlers = &v->domain->arch.io_handlers;
for ( i = 0; i < io_handlers->num_entries; i++ )
@@ -36,19 +36,23 @@ int handle_mmio(mmio_info_t *info)
if ( (info->gpa >= mmio_handler->addr) &&
(info->gpa < (mmio_handler->addr + mmio_handler->size)) )
- {
- return info->dabt.write ?
- mmio_handler->mmio_handler_ops->write_handler(v, info) :
- mmio_handler->mmio_handler_ops->read_handler(v, info);
- }
+ break;
}
- return 0;
+ if ( i == io_handlers->num_entries )
+ return 0;
+
+ if ( info->dabt.write )
+ return mmio_handler->mmio_handler_ops->write_handler(v, info,
+ mmio_handler->priv);
+ else
+ return mmio_handler->mmio_handler_ops->read_handler(v, info,
+ mmio_handler->priv);
}
void register_mmio_handler(struct domain *d,
const struct mmio_handler_ops *handle,
- paddr_t addr, paddr_t size)
+ paddr_t addr, paddr_t size, void *priv)
{
struct io_handler *handler = &d->arch.io_handlers;
@@ -59,6 +63,7 @@ void register_mmio_handler(struct domain *d,
handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle;
handler->mmio_handlers[handler->num_entries].addr = addr;
handler->mmio_handlers[handler->num_entries].size = size;
+ handler->mmio_handlers[handler->num_entries].priv = priv;
dsb(ish);
handler->num_entries++;
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index fa71598..8e50f22 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -50,7 +50,8 @@ void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t vbase)
vgic_v2_hw.vbase = vbase;
}
-static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
+static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
+ void *priv)
{
struct hsr_dabt dabt = info->dabt;
struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -247,7 +248,8 @@ static int vgic_v2_to_sgi(struct vcpu *v, register_t sgir)
return vgic_to_sgi(v, sgir, sgi_mode, virq, &target);
}
-static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
+static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
+ void *priv)
{
struct hsr_dabt dabt = info->dabt;
struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -590,7 +592,7 @@ static int vgic_v2_domain_init(struct domain *d)
sizeof(d->arch.vgic.shared_irqs[i].v2.itargets));
register_mmio_handler(d, &vgic_v2_distr_mmio_handler, d->arch.vgic.dbase,
- PAGE_SIZE);
+ PAGE_SIZE, NULL);
return 0;
}
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index f1c482d..6a4feb2 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -634,7 +634,8 @@ static inline struct vcpu *get_vcpu_from_rdist(paddr_t gpa,
return d->vcpu[vcpu_id];
}
-static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info)
+static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info,
+ void *priv)
{
uint32_t offset;
@@ -656,7 +657,8 @@ static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info)
return 0;
}
-static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info)
+static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info,
+ void *priv)
{
uint32_t offset;
@@ -678,7 +680,8 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info)
return 0;
}
-static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
+static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
+ void *priv)
{
struct hsr_dabt dabt = info->dabt;
struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -835,7 +838,8 @@ read_as_zero:
return 1;
}
-static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
+static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info,
+ void *priv)
{
struct hsr_dabt dabt = info->dabt;
struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -1200,7 +1204,7 @@ static int vgic_v3_domain_init(struct domain *d)
/* Register mmio handle for the Distributor */
register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase,
- SZ_64K);
+ SZ_64K, NULL);
/*
* Register mmio handler per contiguous region occupied by the
@@ -1210,7 +1214,8 @@ static int vgic_v3_domain_init(struct domain *d)
for ( i = 0; i < d->arch.vgic.nr_regions; i++ )
register_mmio_handler(d, &vgic_rdistr_mmio_handler,
d->arch.vgic.rdist_regions[i].base,
- d->arch.vgic.rdist_regions[i].size);
+ d->arch.vgic.rdist_regions[i].size,
+ NULL);
d->arch.vgic.ctlr = VGICD_CTLR_DEFAULT;
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index d9f4249..51d0557 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -45,8 +45,8 @@
#define domain_has_vuart(d) ((d)->arch.vuart.info != NULL)
-static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info);
-static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info);
+static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, void *priv);
+static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, void *priv);
static const struct mmio_handler_ops vuart_mmio_handler = {
.read_handler = vuart_mmio_read,
@@ -70,7 +70,8 @@ int domain_vuart_init(struct domain *d)
register_mmio_handler(d, &vuart_mmio_handler,
d->arch.vuart.info->base_addr,
- d->arch.vuart.info->size);
+ d->arch.vuart.info->size,
+ NULL);
return 0;
}
@@ -105,7 +106,7 @@ static void vuart_print_char(struct vcpu *v, char c)
spin_unlock(&uart->lock);
}
-static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info)
+static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, void *priv)
{
struct domain *d = v->domain;
struct hsr_dabt dabt = info->dabt;
@@ -125,7 +126,7 @@ static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info)
return 1;
}
-static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info)
+static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, void *priv)
{
struct domain *d = v->domain;
struct hsr_dabt dabt = info->dabt;
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index 0160f09..294c18b 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -32,8 +32,8 @@ typedef struct
paddr_t gpa;
} mmio_info_t;
-typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t *info);
-typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info);
+typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t *info, void *priv);
+typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info, void *priv);
typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr);
struct mmio_handler_ops {
@@ -45,6 +45,7 @@ struct mmio_handler {
paddr_t addr;
paddr_t size;
const struct mmio_handler_ops *mmio_handler_ops;
+ void *priv;
};
struct io_handler {
@@ -56,7 +57,7 @@ struct io_handler {
extern int handle_mmio(mmio_info_t *info);
void register_mmio_handler(struct domain *d,
const struct mmio_handler_ops *handle,
- paddr_t addr, paddr_t size);
+ paddr_t addr, paddr_t size, void *priv);
int domain_io_init(struct domain *d);
#endif /* __ASM_ARM_MMIO_H__ */
--
2.1.4
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 2/3] xen/arm: vgic-v3: Correctly retrieve the vCPU associated to a re-distributor
2015-09-29 14:44 [PATCH v2 0/3] xen/arm vgic-v3: Correctly retrieved the vCPU associated to a re-distributor Julien Grall
2015-09-29 14:44 ` [PATCH v2 1/3] xen/arm: io: Extend write/read handler to pass private data Julien Grall
@ 2015-09-29 14:44 ` Julien Grall
2015-09-29 14:44 ` [PATCH v2 3/3] xen/arm: io: Shorten the name of the fields and clean up Julien Grall
2015-10-01 12:56 ` [PATCH v2 0/3] xen/arm vgic-v3: Correctly retrieved the vCPU associated to a re-distributor Ian Campbell
3 siblings, 0 replies; 7+ messages in thread
From: Julien Grall @ 2015-09-29 14:44 UTC (permalink / raw)
To: xen-devel
Cc: Julien Grall, stefano.stabellini, ian.campbell,
shameerali.kolothum.thodi
When the guest is accessing the re-distributor, Xen retrieves the base
of the re-distributor using a mask based on the stride.
When the stride contains multiple bits set, the corresponding mask will be
computed incorrectly [1] and therefore giving invalid vCPU and offset:
(XEN) d0v0: vGICR: unknown gpa read address 000000008d130008
(XEN) traps.c:2447:d0v1 HSR=0x93c08006 pc=0xffffffc00032362c
gva=0xffffff80000b0008 gpa=0x0000008d130008
For instance if the region of re-distributor is starting at 0x8d100000
and the stride is 0x30000, an access to the address 0x8d130008 should
be valid and use the re-distributor of vCPU1 with an offset of 0x8.
Although, Xen is returning the vCPU0 and an offset of 0x20008.
I didn't find a way to replace the current computation of the mask with
a valid one. The only solution I have found is to pass the region in
private data of the handler. So we can directly get the offset from the
beginning of the region and find the corresponding vCPU/offset in the
re-distributor.
This is also make the code simpler and avoid fast/slow path.
[1] http://lists.xen.org/archives/html/xen-devel/2015-09/msg03372.html
Signed-off-by: Julien Grall <julien.grall@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
Cc: shameerali.kolothum.thodi@huawei.com
This is technically a good candidate for Xen 4.6. Without it DOM0
may not be able to bring secondary CPU on platform using a stride
with multiple bit set [1]. Although, we don't support such platform
right now. So I would rather defer this change to Xen 4.6.1 and
avoid possible downside/bug in this patch.
It has been tested by Shamaeerali who hit the problem. I'm will be
happy to add his tested-by if he wants to.
[1] http://lists.xen.org/archives/html/xen-devel/2015-09/msg03372.html
Changes in v2:
- Typoes in the commit message
- Add Ian's acked-by
---
xen/arch/arm/vgic-v3.c | 58 +++++++++++++++++---------------------------------
1 file changed, 20 insertions(+), 38 deletions(-)
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 6a4feb2..0a14184 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -593,55 +593,34 @@ write_ignore_32:
return 1;
}
-static inline struct vcpu *get_vcpu_from_rdist(paddr_t gpa,
- struct vcpu *v,
- uint32_t *offset)
+static struct vcpu *get_vcpu_from_rdist(struct domain *d,
+ const struct vgic_rdist_region *region,
+ paddr_t gpa, uint32_t *offset)
{
- struct domain *d = v->domain;
+ struct vcpu *v;
uint32_t stride = d->arch.vgic.rdist_stride;
- paddr_t base;
- int i, vcpu_id;
- struct vgic_rdist_region *region;
-
- *offset = gpa & (stride - 1);
- base = gpa & ~((paddr_t)stride - 1);
-
- /* Fast path: the VCPU is trying to access its re-distributor */
- if ( likely(v->arch.vgic.rdist_base == base) )
- return v;
-
- /* Slow path: the VCPU is trying to access another re-distributor */
-
- /*
- * Find the region where the re-distributor lives. For this purpose,
- * we look one region ahead as only MMIO range for redistributors
- * traps here.
- * Note: The region has been ordered during the GIC initialization
- */
- for ( i = 1; i < d->arch.vgic.nr_regions; i++ )
- {
- if ( base < d->arch.vgic.rdist_regions[i].base )
- break;
- }
-
- region = &d->arch.vgic.rdist_regions[i - 1];
-
- vcpu_id = region->first_cpu + ((base - region->base) / stride);
+ unsigned int vcpu_id;
+ vcpu_id = region->first_cpu + ((gpa - region->base) / stride);
if ( unlikely(vcpu_id >= d->max_vcpus) )
return NULL;
- return d->vcpu[vcpu_id];
+ v = d->vcpu[vcpu_id];
+
+ *offset = gpa - v->arch.vgic.rdist_base;
+
+ return v;
}
static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info,
void *priv)
{
uint32_t offset;
+ const struct vgic_rdist_region *region = priv;
perfc_incr(vgicr_reads);
- v = get_vcpu_from_rdist(info->gpa, v, &offset);
+ v = get_vcpu_from_rdist(v->domain, region, info->gpa, &offset);
if ( unlikely(!v) )
return 0;
@@ -661,10 +640,11 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info,
void *priv)
{
uint32_t offset;
+ const struct vgic_rdist_region *region = priv;
perfc_incr(vgicr_writes);
- v = get_vcpu_from_rdist(info->gpa, v, &offset);
+ v = get_vcpu_from_rdist(v->domain, region, info->gpa, &offset);
if ( unlikely(!v) )
return 0;
@@ -1212,10 +1192,12 @@ static int vgic_v3_domain_init(struct domain *d)
* redistributor is targeted.
*/
for ( i = 0; i < d->arch.vgic.nr_regions; i++ )
+ {
+ struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i];
+
register_mmio_handler(d, &vgic_rdistr_mmio_handler,
- d->arch.vgic.rdist_regions[i].base,
- d->arch.vgic.rdist_regions[i].size,
- NULL);
+ region->base, region->size, region);
+ }
d->arch.vgic.ctlr = VGICD_CTLR_DEFAULT;
--
2.1.4
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 3/3] xen/arm: io: Shorten the name of the fields and clean up
2015-09-29 14:44 [PATCH v2 0/3] xen/arm vgic-v3: Correctly retrieved the vCPU associated to a re-distributor Julien Grall
2015-09-29 14:44 ` [PATCH v2 1/3] xen/arm: io: Extend write/read handler to pass private data Julien Grall
2015-09-29 14:44 ` [PATCH v2 2/3] xen/arm: vgic-v3: Correctly retrieve the vCPU associated to a re-distributor Julien Grall
@ 2015-09-29 14:44 ` Julien Grall
2015-10-01 12:41 ` Ian Campbell
2015-10-01 12:56 ` [PATCH v2 0/3] xen/arm vgic-v3: Correctly retrieved the vCPU associated to a re-distributor Ian Campbell
3 siblings, 1 reply; 7+ messages in thread
From: Julien Grall @ 2015-09-29 14:44 UTC (permalink / raw)
To: xen-devel; +Cc: Julien Grall, stefano.stabellini, ian.campbell
The field names in the IO emulation are really long and use repeatedly
the term handler which make some line cumbersome to read:
mmio_handler->mmio_handler_ops->write_handler
Also take the opportunity to do some clean up:
- Avoid "handler" vs "handle" in register_mmio_handler
- Use a local variable to initialize handler in
register_mmio_handler
- Add a comment explaining the dsb(ish) in register_mmio_handler
- Rename the structure io_handler into vmmio because the io_handler
is in fine handling multiple handlers and the name a the fields was
io_handlers. Also rename the field io_handlers to vmmio
- Rename the field mmio_handler_ops to ops because we are in the
structure mmio_handler to not need to repeat it
- Rename the field mmio_handlers to handlers because we are in the
vmmio structure
- Make it clear that register_mmio_ops is taking an ops and not an
handle
- Clean up local variable to help to understand the code
Signed-off-by: Julien Grall <julien.grall@citrix.com>
---
This is the last patch of the series so to make easy to backport the
previous patches to Xen 4.6 if necessary.
Ian, I haven't kept your acked-by because I had to rebase after the
changes you requested on patch #1.
Changes in v2:
- Small rebase after changes in #1
- Typoes in the commit message
---
xen/arch/arm/io.c | 54 +++++++++++++++++++++++++-------------------
xen/arch/arm/vgic-v2.c | 4 ++--
xen/arch/arm/vgic-v3.c | 8 +++----
xen/arch/arm/vuart.c | 4 ++--
xen/include/asm-arm/domain.h | 3 ++-
xen/include/asm-arm/mmio.h | 12 +++++-----
6 files changed, 47 insertions(+), 38 deletions(-)
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index b8f4a18..b418173 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -27,53 +27,61 @@ int handle_mmio(mmio_info_t *info)
{
struct vcpu *v = current;
int i;
- const struct mmio_handler *mmio_handler = NULL;
- const struct io_handler *io_handlers = &v->domain->arch.io_handlers;
+ const struct mmio_handler *handler = NULL;
+ const struct vmmio *vmmio = &v->domain->arch.vmmio;
- for ( i = 0; i < io_handlers->num_entries; i++ )
+ for ( i = 0; i < vmmio->num_entries; i++ )
{
- mmio_handler = &io_handlers->mmio_handlers[i];
+ handler = &vmmio->handlers[i];
- if ( (info->gpa >= mmio_handler->addr) &&
- (info->gpa < (mmio_handler->addr + mmio_handler->size)) )
+ if ( (info->gpa >= handler->addr) &&
+ (info->gpa < (handler->addr + handler->size)) )
break;
}
- if ( i == io_handlers->num_entries )
+ if ( i == vmmio->num_entries )
return 0;
if ( info->dabt.write )
- return mmio_handler->mmio_handler_ops->write_handler(v, info,
- mmio_handler->priv);
+ return handler->ops->write(v, info, handler->priv);
else
- return mmio_handler->mmio_handler_ops->read_handler(v, info,
- mmio_handler->priv);
+ return handler->ops->read(v, info, handler->priv);
}
void register_mmio_handler(struct domain *d,
- const struct mmio_handler_ops *handle,
+ const struct mmio_handler_ops *ops,
paddr_t addr, paddr_t size, void *priv)
{
- struct io_handler *handler = &d->arch.io_handlers;
+ struct vmmio *vmmio = &d->arch.vmmio;
+ struct mmio_handler *handler;
- BUG_ON(handler->num_entries >= MAX_IO_HANDLER);
+ BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER);
- spin_lock(&handler->lock);
+ spin_lock(&vmmio->lock);
- handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle;
- handler->mmio_handlers[handler->num_entries].addr = addr;
- handler->mmio_handlers[handler->num_entries].size = size;
- handler->mmio_handlers[handler->num_entries].priv = priv;
+ handler = &vmmio->handlers[vmmio->num_entries];
+
+ handler->ops = ops;
+ handler->addr = addr;
+ handler->size = size;
+ handler->priv = priv;
+
+ /*
+ * handle_mmio is not using the lock to avoid contention.
+ * Make sure the other processors see the new handler before
+ * updating the number of entries
+ */
dsb(ish);
- handler->num_entries++;
- spin_unlock(&handler->lock);
+ vmmio->num_entries++;
+
+ spin_unlock(&vmmio->lock);
}
int domain_io_init(struct domain *d)
{
- spin_lock_init(&d->arch.io_handlers.lock);
- d->arch.io_handlers.num_entries = 0;
+ spin_lock_init(&d->arch.vmmio.lock);
+ d->arch.vmmio.num_entries = 0;
return 0;
}
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 8e50f22..f886724 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -495,8 +495,8 @@ write_ignore:
}
static const struct mmio_handler_ops vgic_v2_distr_mmio_handler = {
- .read_handler = vgic_v2_distr_mmio_read,
- .write_handler = vgic_v2_distr_mmio_write,
+ .read = vgic_v2_distr_mmio_read,
+ .write = vgic_v2_distr_mmio_write,
};
static struct vcpu *vgic_v2_get_target_vcpu(struct vcpu *v, unsigned int irq)
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 0a14184..beb3621 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -1037,13 +1037,13 @@ static int vgic_v3_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr)
}
static const struct mmio_handler_ops vgic_rdistr_mmio_handler = {
- .read_handler = vgic_v3_rdistr_mmio_read,
- .write_handler = vgic_v3_rdistr_mmio_write,
+ .read = vgic_v3_rdistr_mmio_read,
+ .write = vgic_v3_rdistr_mmio_write,
};
static const struct mmio_handler_ops vgic_distr_mmio_handler = {
- .read_handler = vgic_v3_distr_mmio_read,
- .write_handler = vgic_v3_distr_mmio_write,
+ .read = vgic_v3_distr_mmio_read,
+ .write = vgic_v3_distr_mmio_write,
};
static int vgic_v3_get_irq_priority(struct vcpu *v, unsigned int irq)
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index 51d0557..2495e87 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -49,8 +49,8 @@ static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, void *priv);
static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, void *priv);
static const struct mmio_handler_ops vuart_mmio_handler = {
- .read_handler = vuart_mmio_read,
- .write_handler = vuart_mmio_write,
+ .read = vuart_mmio_read,
+ .write = vuart_mmio_write,
};
int domain_vuart_init(struct domain *d)
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index c3f5a95..01859cc 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -55,7 +55,8 @@ struct arch_domain
struct hvm_domain hvm_domain;
xen_pfn_t *grant_table_gpfn;
- struct io_handler io_handlers;
+ struct vmmio vmmio;
+
/* Continuable domain_relinquish_resources(). */
enum {
RELMEM_not_started,
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index 294c18b..1cd7a7a 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -37,26 +37,26 @@ typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info, void *priv);
typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr);
struct mmio_handler_ops {
- mmio_read_t read_handler;
- mmio_write_t write_handler;
+ mmio_read_t read;
+ mmio_write_t write;
};
struct mmio_handler {
paddr_t addr;
paddr_t size;
- const struct mmio_handler_ops *mmio_handler_ops;
+ const struct mmio_handler_ops *ops;
void *priv;
};
-struct io_handler {
+struct vmmio {
int num_entries;
spinlock_t lock;
- struct mmio_handler mmio_handlers[MAX_IO_HANDLER];
+ struct mmio_handler handlers[MAX_IO_HANDLER];
};
extern int handle_mmio(mmio_info_t *info);
void register_mmio_handler(struct domain *d,
- const struct mmio_handler_ops *handle,
+ const struct mmio_handler_ops *ops,
paddr_t addr, paddr_t size, void *priv);
int domain_io_init(struct domain *d);
--
2.1.4
^ permalink raw reply related [flat|nested] 7+ messages in thread