* [PATCH 01/14] arm/arm64: KVM: rework MPIDR assignment and add accessors
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 02/14] arm/arm64: KVM: pass down user space provided GIC type into vGIC code Andre Przywara
` (13 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
The virtual MPIDR registers (containing topology information) for the
guest are currently mapped linearily to the vcpu_id. Improve this
mapping for arm64 by using three levels to not artificially limit the
number of vCPUs. Also add an accessor to later allow easier access to
a vCPU with a given MPIDR.
Use this new accessor in the PSCI emulation.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm/include/asm/kvm_emulate.h | 2 +-
arch/arm/include/asm/kvm_host.h | 2 ++
arch/arm/kvm/arm.c | 15 +++++++++++++++
arch/arm/kvm/psci.c | 15 ++++-----------
arch/arm64/include/asm/kvm_emulate.h | 3 ++-
arch/arm64/include/asm/kvm_host.h | 2 ++
arch/arm64/kvm/sys_regs.c | 11 +++++++++--
7 files changed, 35 insertions(+), 15 deletions(-)
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 0fa90c9..6b528e4 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -159,7 +159,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cp15[c0_MPIDR];
+ return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d6d5227..8d30f05 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -236,4 +236,6 @@ int kvm_perf_teardown(void);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9b3957d..9ffe962 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1027,6 +1027,21 @@ static void check_kvm_target_cpu(void *ret)
*(int *)ret = kvm_target_cpu();
}
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+{
+ unsigned long c_mpidr;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ mpidr &= MPIDR_HWID_BITMASK;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ c_mpidr = kvm_vcpu_get_mpidr(vcpu);
+ if (c_mpidr == mpidr)
+ return vcpu;
+ }
+ return NULL;
+}
+
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 09cf377..49f0992 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -21,6 +21,7 @@
#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_psci.h>
+#include <asm/kvm_host.h>
/*
* This is an implementation of the Power State Coordination Interface
@@ -65,25 +66,17 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu = NULL, *tmp;
+ struct kvm_vcpu *vcpu = NULL;
wait_queue_head_t *wq;
unsigned long cpu_id;
unsigned long context_id;
- unsigned long mpidr;
phys_addr_t target_pc;
- int i;
- cpu_id = *vcpu_reg(source_vcpu, 1);
+ cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
- kvm_for_each_vcpu(i, tmp, kvm) {
- mpidr = kvm_vcpu_get_mpidr(tmp);
- if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
- vcpu = tmp;
- break;
- }
- }
+ vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
/*
* Make sure the caller requested a valid CPU and that the CPU is
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc..685ea1b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -27,6 +27,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
+#include <asm/cputype.h>
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
@@ -179,7 +180,7 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
{
- return vcpu_sys_reg(vcpu, MPIDR_EL1);
+ return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4ae9213..4c84250 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -228,4 +228,6 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
}
}
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c59a1bd..fa2273a 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -192,10 +192,17 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
+ u64 mpidr;
+
/*
- * Simply map the vcpu_id into the Aff0 field of the MPIDR.
+ * Map the vcpu_id into the first three Aff fields of the MPIDR.
+ * Aff0 uses only 16 CPUs, since there is a SGI injection
+ * limitation of GICv3.
*/
- vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
+ mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
+ vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
}
/*
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 02/14] arm/arm64: KVM: pass down user space provided GIC type into vGIC code
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
2014-06-19 9:45 ` [PATCH 01/14] arm/arm64: KVM: rework MPIDR assignment and add accessors Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 03/14] arm/arm64: KVM: refactor vgic_handle_mmio() function Andre Przywara
` (12 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
With the introduction of a second emulated GIC model we need to let
userspace specify the GIC model to use for each VM. Pass the
userspace provided value down into the vGIC code to differentiate
later.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm/kvm/arm.c | 2 +-
include/kvm/arm_vgic.h | 4 ++--
virt/kvm/arm/vgic.c | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9ffe962..fa37fa1 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -804,7 +804,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
if (vgic_present)
- return kvm_vgic_create(kvm);
+ return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
else
return -ENXIO;
}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f5788cf..4feac9a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -231,7 +231,7 @@ struct kvm_exit_mmio;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_init(struct kvm *kvm);
-int kvm_vgic_create(struct kvm *kvm);
+int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
@@ -282,7 +282,7 @@ static inline int kvm_vgic_init(struct kvm *kvm)
return 0;
}
-static inline int kvm_vgic_create(struct kvm *kvm)
+static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
{
return 0;
}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 68ac9c6..8f1daf2 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1832,7 +1832,7 @@ out:
return ret;
}
-int kvm_vgic_create(struct kvm *kvm)
+int kvm_vgic_create(struct kvm *kvm, u32 type)
{
int i, vcpu_lock_idx = -1, ret = 0;
struct kvm_vcpu *vcpu;
@@ -2284,7 +2284,7 @@ static void vgic_destroy(struct kvm_device *dev)
static int vgic_create(struct kvm_device *dev, u32 type)
{
- return kvm_vgic_create(dev->kvm);
+ return kvm_vgic_create(dev->kvm, type);
}
struct kvm_device_ops kvm_arm_vgic_v2_ops = {
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 03/14] arm/arm64: KVM: refactor vgic_handle_mmio() function
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
2014-06-19 9:45 ` [PATCH 01/14] arm/arm64: KVM: rework MPIDR assignment and add accessors Andre Przywara
2014-06-19 9:45 ` [PATCH 02/14] arm/arm64: KVM: pass down user space provided GIC type into vGIC code Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones Andre Przywara
` (11 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
Currently we only need to deal with one MMIO region for the GIC
emulation, but we soon need to extend this. Refactor the existing
code to allow easier addition of different ranges without code
duplication.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic.c | 72 ++++++++++++++++++++++++++++++++++++---------------
1 file changed, 51 insertions(+), 21 deletions(-)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 8f1daf2..4c6b212 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -905,37 +905,28 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
return true;
}
-/**
- * vgic_handle_mmio - handle an in-kernel MMIO access
+/*
+ * vgic_handle_mmio_range - handle an in-kernel MMIO access
* @vcpu: pointer to the vcpu performing the access
* @run: pointer to the kvm_run structure
* @mmio: pointer to the data describing the access
+ * @ranges: pointer to the register defining structure
+ * @mmio_base: base address for this mapping
*
- * returns true if the MMIO access has been performed in kernel space,
- * and false if it needs to be emulated in user space.
+ * returns true if the MMIO access could be performed
*/
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
+static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct mmio_range *ranges,
+ unsigned long mmio_base)
{
const struct mmio_range *range;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long base = dist->vgic_dist_base;
bool updated_state;
unsigned long offset;
- if (!irqchip_in_kernel(vcpu->kvm) ||
- mmio->phys_addr < base ||
- (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
- return false;
-
- /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
- if (mmio->len > 4) {
- kvm_inject_dabt(vcpu, mmio->phys_addr);
- return true;
- }
-
- offset = mmio->phys_addr - base;
- range = find_matching_range(vgic_dist_ranges, mmio, offset);
+ offset = mmio->phys_addr - mmio_base;
+ range = find_matching_range(ranges, mmio, offset);
if (unlikely(!range || !range->handle_mmio)) {
pr_warn("Unhandled access %d %08llx %d\n",
mmio->is_write, mmio->phys_addr, mmio->len);
@@ -943,7 +934,7 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
spin_lock(&vcpu->kvm->arch.vgic.lock);
- offset = mmio->phys_addr - range->base - base;
+ offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
updated_state = range->handle_mmio(vcpu, mmio, offset);
} else {
@@ -961,6 +952,45 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true;
}
+#define IS_IN_RANGE(addr, alen, base, len) \
+ (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
+
+static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
+
+ if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
+ KVM_VGIC_V2_DIST_SIZE))
+ return false;
+
+ /* GICv2 does not support accesses wider than 32 bits */
+ if (mmio->len > 4) {
+ kvm_inject_dabt(vcpu, mmio->phys_addr);
+ return true;
+ }
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
+}
+
+/**
+ * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
+ * @vcpu: pointer to the vcpu performing the access
+ * @run: pointer to the kvm_run structure
+ * @mmio: pointer to the data describing the access
+ *
+ * returns true if the MMIO access has been performed in kernel space,
+ * and false if it needs to be emulated in user space.
+ */
+bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return false;
+
+ return vgic_v2_handle_mmio(vcpu, run, mmio);
+}
+
static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
{
return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (2 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 03/14] arm/arm64: KVM: refactor vgic_handle_mmio() function Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 21:15 ` Chalamarla, Tirumalesh
2014-06-19 9:45 ` [PATCH 05/14] arm/arm64: KVM: introduce per-VM ops Andre Przywara
` (10 subsequent siblings)
14 siblings, 1 reply; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
Some GICv3 registers can and will be accessed as 64 bit registers.
Currently the register handling code can only deal with 32 bit
accesses, so we do two consecutive calls to cover this.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic.c | 48 +++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 45 insertions(+), 3 deletions(-)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 4c6b212..b3cf4c7 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -906,6 +906,48 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
}
/*
+ * Call the respective handler function for the given range.
+ * We split up any 64 bit accesses into two consecutive 32 bit
+ * handler calls and merge the result afterwards.
+ */
+static bool call_range_handler(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ unsigned long offset,
+ const struct mmio_range *range)
+{
+ u32 *data32 = (void *)mmio->data;
+ struct kvm_exit_mmio mmio32;
+ bool ret;
+
+ if (likely(mmio->len <= 4))
+ return range->handle_mmio(vcpu, mmio, offset);
+
+ /*
+ * We assume that any access greater than 4 bytes is actually
+ * 8 bytes long, caused by a 64-bit access
+ */
+
+ mmio32.len = 4;
+ mmio32.is_write = mmio->is_write;
+
+ mmio32.phys_addr = mmio->phys_addr + 4;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[1];
+ ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
+ if (!mmio->is_write)
+ data32[1] = *(u32 *)mmio32.data;
+
+ mmio32.phys_addr = mmio->phys_addr;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[0];
+ ret |= range->handle_mmio(vcpu, &mmio32, offset);
+ if (!mmio->is_write)
+ data32[0] = *(u32 *)mmio32.data;
+
+ return ret;
+}
+
+/*
* vgic_handle_mmio_range - handle an in-kernel MMIO access
* @vcpu: pointer to the vcpu performing the access
* @run: pointer to the kvm_run structure
@@ -936,10 +978,10 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
spin_lock(&vcpu->kvm->arch.vgic.lock);
offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
- updated_state = range->handle_mmio(vcpu, mmio, offset);
+ updated_state = call_range_handler(vcpu, mmio, offset, range);
} else {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ if (!mmio->is_write)
+ memset(mmio->data, 0, mmio->len);
updated_state = false;
}
spin_unlock(&vcpu->kvm->arch.vgic.lock);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
2014-06-19 9:45 ` [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones Andre Przywara
@ 2014-06-19 21:15 ` Chalamarla, Tirumalesh
2014-06-20 8:34 ` Andre Przywara
0 siblings, 1 reply; 25+ messages in thread
From: Chalamarla, Tirumalesh @ 2014-06-19 21:15 UTC (permalink / raw)
To: linux-arm-kernel
-----Original Message-----
From: kvmarm-bounces@lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
Sent: Thursday, June 19, 2014 2:46 AM
To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
Cc: christoffer.dall at linaro.org
Subject: [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
Some GICv3 registers can and will be accessed as 64 bit registers.
Currently the register handling code can only deal with 32 bit accesses, so we do two consecutive calls to cover this.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic.c | 48 +++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 45 insertions(+), 3 deletions(-)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 4c6b212..b3cf4c7 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -906,6 +906,48 @@ static bool vgic_validate_access(const struct vgic_dist *dist, }
/*
+ * Call the respective handler function for the given range.
+ * We split up any 64 bit accesses into two consecutive 32 bit
+ * handler calls and merge the result afterwards.
+ */
+static bool call_range_handler(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ unsigned long offset,
+ const struct mmio_range *range) {
+ u32 *data32 = (void *)mmio->data;
+ struct kvm_exit_mmio mmio32;
+ bool ret;
+
+ if (likely(mmio->len <= 4))
+ return range->handle_mmio(vcpu, mmio, offset);
+
+ /*
+ * We assume that any access greater than 4 bytes is actually
+ * 8 bytes long, caused by a 64-bit access
+ */
+
+ mmio32.len = 4;
+ mmio32.is_write = mmio->is_write;
+
+ mmio32.phys_addr = mmio->phys_addr + 4;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[1];
+ ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
+ if (!mmio->is_write)
+ data32[1] = *(u32 *)mmio32.data;
+
+ mmio32.phys_addr = mmio->phys_addr;
+ if (mmio->is_write)
+ *(u32 *)mmio32.data = data32[0];
+ ret |= range->handle_mmio(vcpu, &mmio32, offset);
+ if (!mmio->is_write)
+ data32[0] = *(u32 *)mmio32.data;
+
+ return ret;
+}
Any reason to use two 32 bits instead of one 64 bit. AArch32 on ARMv8 may be.
+
+/*
* vgic_handle_mmio_range - handle an in-kernel MMIO access
* @vcpu: pointer to the vcpu performing the access
* @run: pointer to the kvm_run structure
@@ -936,10 +978,10 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
spin_lock(&vcpu->kvm->arch.vgic.lock);
offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
- updated_state = range->handle_mmio(vcpu, mmio, offset);
+ updated_state = call_range_handler(vcpu, mmio, offset, range);
} else {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ if (!mmio->is_write)
+ memset(mmio->data, 0, mmio->len);
What is the use of this memset.
updated_state = false;
}
spin_unlock(&vcpu->kvm->arch.vgic.lock);
--
1.7.9.5
_______________________________________________
kvmarm mailing list
kvmarm at lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
2014-06-19 21:15 ` Chalamarla, Tirumalesh
@ 2014-06-20 8:34 ` Andre Przywara
2014-06-20 8:46 ` Marc Zyngier
0 siblings, 1 reply; 25+ messages in thread
From: Andre Przywara @ 2014-06-20 8:34 UTC (permalink / raw)
To: linux-arm-kernel
On 19/06/14 22:15, Chalamarla, Tirumalesh wrote:
>
>
> -----Original Message-----
> From: kvmarm-bounces at lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
> Sent: Thursday, June 19, 2014 2:46 AM
> To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
> Cc: christoffer.dall at linaro.org
> Subject: [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
>
> Some GICv3 registers can and will be accessed as 64 bit registers.
> Currently the register handling code can only deal with 32 bit accesses, so we do two consecutive calls to cover this.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
> virt/kvm/arm/vgic.c | 48 +++++++++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 45 insertions(+), 3 deletions(-)
>
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 4c6b212..b3cf4c7 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -906,6 +906,48 @@ static bool vgic_validate_access(const struct vgic_dist *dist, }
>
> /*
> + * Call the respective handler function for the given range.
> + * We split up any 64 bit accesses into two consecutive 32 bit
> + * handler calls and merge the result afterwards.
> + */
> +static bool call_range_handler(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + unsigned long offset,
> + const struct mmio_range *range) {
> + u32 *data32 = (void *)mmio->data;
> + struct kvm_exit_mmio mmio32;
> + bool ret;
> +
> + if (likely(mmio->len <= 4))
> + return range->handle_mmio(vcpu, mmio, offset);
> +
> + /*
> + * We assume that any access greater than 4 bytes is actually
> + * 8 bytes long, caused by a 64-bit access
> + */
> +
> + mmio32.len = 4;
> + mmio32.is_write = mmio->is_write;
> +
> + mmio32.phys_addr = mmio->phys_addr + 4;
> + if (mmio->is_write)
> + *(u32 *)mmio32.data = data32[1];
> + ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
> + if (!mmio->is_write)
> + data32[1] = *(u32 *)mmio32.data;
> +
> + mmio32.phys_addr = mmio->phys_addr;
> + if (mmio->is_write)
> + *(u32 *)mmio32.data = data32[0];
> + ret |= range->handle_mmio(vcpu, &mmio32, offset);
> + if (!mmio->is_write)
> + data32[0] = *(u32 *)mmio32.data;
> +
> + return ret;
> +}
>
> Any reason to use two 32 bits instead of one 64 bit. AArch32 on ARMv8 may be.
For the registers we care about right now we get along with this split.
And it seems to be less intrusive.
I have a patch to support native 64-bit accesses, but it needs some more
work. If there is high demand for it, I can post it (but Marc didn't
like the first version so much ;-) )
Regards,
Andre.
>
> +
> +/*
> * vgic_handle_mmio_range - handle an in-kernel MMIO access
> * @vcpu: pointer to the vcpu performing the access
> * @run: pointer to the kvm_run structure
> @@ -936,10 +978,10 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
> spin_lock(&vcpu->kvm->arch.vgic.lock);
> offset -= range->base;
> if (vgic_validate_access(dist, range, offset)) {
> - updated_state = range->handle_mmio(vcpu, mmio, offset);
> + updated_state = call_range_handler(vcpu, mmio, offset, range);
> } else {
> - vgic_reg_access(mmio, NULL, offset,
> - ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
> + if (!mmio->is_write)
> + memset(mmio->data, 0, mmio->len);
>
> What is the use of this memset.
>
> updated_state = false;
> }
> spin_unlock(&vcpu->kvm->arch.vgic.lock);
> --
> 1.7.9.5
>
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
>
-- IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2557590
ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2548782
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
2014-06-20 8:34 ` Andre Przywara
@ 2014-06-20 8:46 ` Marc Zyngier
0 siblings, 0 replies; 25+ messages in thread
From: Marc Zyngier @ 2014-06-20 8:46 UTC (permalink / raw)
To: linux-arm-kernel
On 20/06/14 09:34, Andre Przywara wrote:
>
> On 19/06/14 22:15, Chalamarla, Tirumalesh wrote:
>>
>>
>> -----Original Message-----
>> From: kvmarm-bounces at lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
>> Sent: Thursday, June 19, 2014 2:46 AM
>> To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
>> Cc: christoffer.dall at linaro.org
>> Subject: [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
>>
>> Some GICv3 registers can and will be accessed as 64 bit registers.
>> Currently the register handling code can only deal with 32 bit accesses, so we do two consecutive calls to cover this.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>> virt/kvm/arm/vgic.c | 48 +++++++++++++++++++++++++++++++++++++++++++++---
>> 1 file changed, 45 insertions(+), 3 deletions(-)
>>
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 4c6b212..b3cf4c7 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -906,6 +906,48 @@ static bool vgic_validate_access(const struct vgic_dist *dist, }
>>
>> /*
>> + * Call the respective handler function for the given range.
>> + * We split up any 64 bit accesses into two consecutive 32 bit
>> + * handler calls and merge the result afterwards.
>> + */
>> +static bool call_range_handler(struct kvm_vcpu *vcpu,
>> + struct kvm_exit_mmio *mmio,
>> + unsigned long offset,
>> + const struct mmio_range *range) {
>> + u32 *data32 = (void *)mmio->data;
>> + struct kvm_exit_mmio mmio32;
>> + bool ret;
>> +
>> + if (likely(mmio->len <= 4))
>> + return range->handle_mmio(vcpu, mmio, offset);
>> +
>> + /*
>> + * We assume that any access greater than 4 bytes is actually
>> + * 8 bytes long, caused by a 64-bit access
>> + */
>> +
>> + mmio32.len = 4;
>> + mmio32.is_write = mmio->is_write;
>> +
>> + mmio32.phys_addr = mmio->phys_addr + 4;
>> + if (mmio->is_write)
>> + *(u32 *)mmio32.data = data32[1];
>> + ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
>> + if (!mmio->is_write)
>> + data32[1] = *(u32 *)mmio32.data;
>> +
>> + mmio32.phys_addr = mmio->phys_addr;
>> + if (mmio->is_write)
>> + *(u32 *)mmio32.data = data32[0];
>> + ret |= range->handle_mmio(vcpu, &mmio32, offset);
>> + if (!mmio->is_write)
>> + data32[0] = *(u32 *)mmio32.data;
>> +
>> + return ret;
>> +}
>>
>> Any reason to use two 32 bits instead of one 64 bit. AArch32 on ARMv8 may be.
>
> For the registers we care about right now we get along with this split.
> And it seems to be less intrusive.
> I have a patch to support native 64-bit accesses, but it needs some more
> work. If there is high demand for it, I can post it (but Marc didn't
> like the first version so much ;-) )
The main reason is that GICv3 doesn't mandate nor guarantee any form of
64bit atomicity. There is strictly no need to provide a guarantee that
the architecture doesn't/cannot offer (think AArch32 guests for example).
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 05/14] arm/arm64: KVM: introduce per-VM ops
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (3 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 04/14] arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 06/14] arm/arm64: KVM: make the maximum number of vCPUs a per-VM value Andre Przywara
` (9 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
Currently we only have one virtual GIC model supported, so all guests
use the same emulation code. With the addition of another model we
end up with different guests using potentially different vGIC models,
so we have to split up some functions to be per VM.
Introduce a vgic_vm_ops struct to hold function pointers for those
functions that are different and provide the necessary code to
initialize them.
This includes functions that depend on the emulated GIC model only
and functions that depend on the combination of host and guest GIC.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
include/kvm/arm_vgic.h | 18 +++++++-
virt/kvm/arm/vgic-v2.c | 17 +++++++-
virt/kvm/arm/vgic-v3.c | 16 ++++++-
virt/kvm/arm/vgic.c | 109 +++++++++++++++++++++++++++++++++---------------
4 files changed, 121 insertions(+), 39 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4feac9a..7e7c99e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -99,8 +99,6 @@ struct vgic_vmcr {
};
struct vgic_ops {
- struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
- void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
@@ -123,6 +121,17 @@ struct vgic_params {
unsigned int maint_irq;
/* Virtual control interface base address */
void __iomem *vctrl_base;
+ bool (*init_emul)(struct kvm *kvm, int type);
+};
+
+struct vgic_vm_ops {
+ struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
+ void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
+ bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
+ struct kvm_exit_mmio *);
+ bool (*queue_sgi)(struct kvm_vcpu *vcpu, int irq);
+ void (*unqueue_sgi)(struct kvm_vcpu *vcpu, int irq, int source);
+ int (*vgic_init)(struct kvm *kvm, const struct vgic_params *params);
};
struct vgic_dist {
@@ -131,6 +140,9 @@ struct vgic_dist {
bool in_kernel;
bool ready;
+ /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
+ u32 vgic_model;
+
int nr_cpus;
int nr_irqs;
@@ -168,6 +180,8 @@ struct vgic_dist {
/* Bitmap indicating which CPU has something pending */
unsigned long irq_pending_on_cpu;
+
+ struct vgic_vm_ops vm_ops;
#endif
};
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index a55a9a4..f2c214a 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -148,8 +148,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
}
static const struct vgic_ops vgic_v2_ops = {
- .get_lr = vgic_v2_get_lr,
- .set_lr = vgic_v2_set_lr,
.sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
@@ -163,6 +161,20 @@ static const struct vgic_ops vgic_v2_ops = {
static struct vgic_params vgic_v2_params;
+static bool vgic_v2_init_emul(struct kvm *kvm, int type)
+{
+ struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ vm_ops->get_lr = vgic_v2_get_lr;
+ vm_ops->set_lr = vgic_v2_set_lr;
+ return true;
+ }
+
+ return false;
+}
+
/**
* vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
* @node: pointer to the DT node
@@ -201,6 +213,7 @@ int vgic_v2_probe(struct device_node *vgic_node,
ret = -ENOMEM;
goto out;
}
+ vgic->init_emul = vgic_v2_init_emul;
vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index f01d446..f42961c 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -157,8 +157,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
}
static const struct vgic_ops vgic_v3_ops = {
- .get_lr = vgic_v3_get_lr,
- .set_lr = vgic_v3_set_lr,
.sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr,
@@ -170,6 +168,19 @@ static const struct vgic_ops vgic_v3_ops = {
.enable = vgic_v3_enable,
};
+static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type)
+{
+ struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ vm_ops->get_lr = vgic_v3_get_lr;
+ vm_ops->set_lr = vgic_v3_set_lr;
+ return true;
+ }
+ return false;
+}
+
static struct vgic_params vgic_v3_params;
/**
@@ -215,6 +226,7 @@ int vgic_v3_probe(struct device_node *vgic_node,
ret = -ENXIO;
goto out;
}
+ vgic->init_emul = vgic_v3_init_emul_compat;
vgic->vcpu_base = vcpu_res.start;
vgic->vctrl_base = NULL;
vgic->type = VGIC_V3;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index b3cf4c7..2de58b3 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -99,6 +99,8 @@ static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
+#define vgic_vm_op(kvm, fn) ((kvm)->arch.vgic.vm_ops.fn)
+
static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
{
int nr_longs;
@@ -648,11 +650,16 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
* to the distributor but the active state stays in the LRs, because we don't
* track the active state on the distributor side.
*/
-static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+
+static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
+}
+static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- int vcpu_id = vcpu->vcpu_id;
int i;
for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
@@ -679,7 +686,8 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
*/
vgic_dist_irq_set(vcpu, lr.irq);
if (lr.irq < VGIC_NR_SGIS)
- *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
+ vgic_vm_op(vcpu->kvm, unqueue_sgi)(vcpu, lr.irq,
+ lr.source);
lr.state &= ~LR_STATE_PENDING;
vgic_set_lr(vcpu, i, lr);
@@ -1030,7 +1038,7 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (!irqchip_in_kernel(vcpu->kvm))
return false;
- return vgic_v2_handle_mmio(vcpu, run, mmio);
+ return vgic_vm_op(vcpu->kvm, handle_mmio)(vcpu, run, mmio);
}
static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
@@ -1138,13 +1146,13 @@ static void vgic_update_state(struct kvm *kvm)
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
{
- return vgic_ops->get_lr(vcpu, lr);
+ return vgic_vm_op(vcpu->kvm, get_lr)(vcpu, lr);
}
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr vlr)
{
- vgic_ops->set_lr(vcpu, lr, vlr);
+ return vgic_vm_op(vcpu->kvm, set_lr)(vcpu, lr, vlr);
}
static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
@@ -1282,7 +1290,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return true;
}
-static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
unsigned long sources;
@@ -1357,7 +1365,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
/* SGIs */
for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
- if (!vgic_queue_sgi(vcpu, i))
+ if (!vgic_vm_op(vcpu->kvm, queue_sgi)(vcpu, i))
overflow = 1;
}
@@ -1830,6 +1838,39 @@ void kvm_vgic_destroy(struct kvm *kvm)
vgic_free_maps(&kvm->arch.vgic);
}
+static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret, i;
+
+ dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ return -ENXIO;
+ }
+
+ ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ return ret;
+ }
+
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ params->vcpu_base,
+ KVM_VGIC_V2_CPU_SIZE);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ return ret;
+ }
+
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ return 0;
+}
+
/**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
@@ -1843,7 +1884,7 @@ int kvm_vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
- int ret = 0, i, v;
+ int ret = 0, v;
if (!irqchip_in_kernel(kvm))
return 0;
@@ -1853,8 +1894,6 @@ int kvm_vgic_init(struct kvm *kvm)
if (vgic_initialized(kvm))
goto out;
- dist->nr_cpus = atomic_read(&kvm->online_vcpus);
-
/*
* If nobody configured the number of interrupts, use the
* legacy one.
@@ -1862,28 +1901,9 @@ int kvm_vgic_init(struct kvm *kvm)
if (!dist->nr_irqs)
dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
- if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
- kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
- }
-
- ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
- if (ret) {
- kvm_err("Unable to allocate maps\n");
- goto out;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
- vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ ret = vgic_vm_op(kvm, vgic_init)(kvm, vgic);
+ if (ret)
goto out;
- }
-
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
kvm_for_each_vcpu(v, vcpu, kvm) {
ret = kvm_vgic_vcpu_init(vcpu);
@@ -1904,6 +1924,21 @@ out:
return ret;
}
+static bool init_emulation_ops(struct kvm *kvm, int type)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
+ dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
+ dist->vm_ops.vgic_init = vgic_v2_init;
+ return true;
+ }
+ return false;
+}
+
int kvm_vgic_create(struct kvm *kvm, u32 type)
{
int i, vcpu_lock_idx = -1, ret = 0;
@@ -1911,7 +1946,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
mutex_lock(&kvm->lock);
- if (kvm->arch.vgic.vctrl_base) {
+ if (kvm->arch.vgic.in_kernel) {
ret = -EEXIST;
goto out;
}
@@ -1934,12 +1969,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
}
}
+ if (!vgic->init_emul(kvm, type)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
spin_lock_init(&kvm->arch.vgic.lock);
kvm->arch.vgic.in_kernel = true;
+ kvm->arch.vgic.vgic_model = type;
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ init_emulation_ops(kvm, type);
+
out_unlock:
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 06/14] arm/arm64: KVM: make the maximum number of vCPUs a per-VM value
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (4 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 05/14] arm/arm64: KVM: introduce per-VM ops Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 07/14] arm/arm64: KVM: make the value of ICC_SRE_EL1 a per-VM variable Andre Przywara
` (8 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
Currently the maximum number of vCPUs supported is a global value
limited by the used GIC model. GICv3 will lift this limit, but we
still need to observe it for guests using GICv2.
So the maximum number of vCPUs is per-VM value, depending on the
GIC model the guest uses.
Store and check the value in struct kvm_arch, but keep it down to
8 for now.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm/include/asm/kvm_host.h | 1 +
arch/arm/kvm/arm.c | 6 ++++++
arch/arm64/include/asm/kvm_host.h | 3 +++
virt/kvm/arm/vgic-v2.c | 5 +++++
virt/kvm/arm/vgic-v3.c | 6 ++++++
5 files changed, 21 insertions(+)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 8d30f05..49c07ea 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -67,6 +67,7 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
+ int max_vcpus;
};
#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index fa37fa1..a291e63 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -142,6 +142,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
+ kvm->arch.max_vcpus = CONFIG_KVM_ARM_MAX_VCPUS;
return ret;
out_free_stage2_pgd:
@@ -260,6 +261,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
int err;
struct kvm_vcpu *vcpu;
+ if (id >= kvm->arch.max_vcpus) {
+ err = -EINVAL;
+ goto out;
+ }
+
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4c84250..eef63b1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -58,6 +58,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The maximum number of vCPUs depends on the used GIC model */
+ int max_vcpus;
+
/* Interrupt controller */
struct vgic_dist vgic;
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index f2c214a..4091078 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -164,11 +164,16 @@ static struct vgic_params vgic_v2_params;
static bool vgic_v2_init_emul(struct kvm *kvm, int type)
{
struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
+ int nr_vcpus;
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
+ nr_vcpus = atomic_read(&kvm->online_vcpus);
+ if (nr_vcpus > 8)
+ return false;
vm_ops->get_lr = vgic_v2_get_lr;
vm_ops->set_lr = vgic_v2_set_lr;
+ kvm->arch.max_vcpus = 8;
return true;
}
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index f42961c..40d6817 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -171,11 +171,17 @@ static const struct vgic_ops vgic_v3_ops = {
static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type)
{
struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
+ int nr_vcpus;
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
+ nr_vcpus = atomic_read(&kvm->online_vcpus);
+ if (nr_vcpus > 8)
+ return false;
+
vm_ops->get_lr = vgic_v3_get_lr;
vm_ops->set_lr = vgic_v3_set_lr;
+ kvm->arch.max_vcpus = 8;
return true;
}
return false;
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 07/14] arm/arm64: KVM: make the value of ICC_SRE_EL1 a per-VM variable
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (5 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 06/14] arm/arm64: KVM: make the maximum number of vCPUs a per-VM value Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors Andre Przywara
` (7 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
ICC_SRE_EL1 is a system register allowing msr/mrs accesses to the
GIC CPU interface for EL1 (guests). Currently we force it to 0, but
for proper GICv3 support we have to allow guests to use it (depending
on their selected virtual GIC model).
So add ICC_SRE_EL1 to the list of saved/restored registers on a
world switch, but actually disallow a guest to change it by only
restoring a fixed, once-initialized value.
This value depends on the GIC model userland has chosen for a guest.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kvm/vgic-v3-switch.S | 14 +++++++++-----
include/kvm/arm_vgic.h | 1 +
virt/kvm/arm/vgic-v3.c | 9 +++++++--
4 files changed, 18 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index e74654c..0f24b21 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -139,6 +139,7 @@ int main(void)
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
index 4ede9d8..c0cfd16 100644
--- a/arch/arm64/kvm/vgic-v3-switch.S
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -148,17 +148,18 @@
* x0: Register pointing to VCPU struct
*/
.macro restore_vgic_v3_state
- // Disable SRE_EL1 access. Necessary, otherwise
- // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
- msr ICC_SRE_EL1, xzr
- isb
-
// Compute the address of struct vgic_cpu
add x3, x0, #VCPU_VGIC_CPU
// Restore all interesting registers
ldr w4, [x3, #VGIC_V3_CPU_HCR]
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+ ldr w25, [x3, #VGIC_V3_CPU_SRE]
+
+ msr ICC_SRE_EL1, x25
+
+ // make sure SRE is valid before writing the other registers
+ isb
msr ICH_HCR_EL2, x4
msr ICH_VMCR_EL2, x5
@@ -243,9 +244,12 @@
dsb sy
// Prevent the guest from touching the GIC system registers
+ // if SRE isn't enabled for GICv3 emulation
+ cbnz x25, 1f
mrs x5, ICC_SRE_EL2
and x5, x5, #~ICC_SRE_EL2_ENABLE
msr ICC_SRE_EL2, x5
+1:
.endm
ENTRY(__save_vgic_v3_state)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 7e7c99e..8aa8482 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -199,6 +199,7 @@ struct vgic_v3_cpu_if {
#ifdef CONFIG_ARM_GIC_V3
u32 vgic_hcr;
u32 vgic_vmcr;
+ u32 vgic_sre; /* Restored only, change ignored */
u32 vgic_misr; /* Saved only */
u32 vgic_eisr; /* Saved only */
u32 vgic_elrsr; /* Saved only */
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 40d6817..7d9c85e 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -145,15 +145,20 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
static void vgic_v3_enable(struct kvm_vcpu *vcpu)
{
+ struct vgic_v3_cpu_if *vgic_v3;
+
+ vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero
* anyway.
*/
- vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
+ vgic_v3->vgic_vmcr = 0;
+
+ vgic_v3->vgic_sre = 0;
/* Get the show on the road... */
- vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
+ vgic_v3->vgic_hcr = ICH_HCR_EN;
}
static const struct vgic_ops vgic_v3_ops = {
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (6 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 07/14] arm/arm64: KVM: make the value of ICC_SRE_EL1 a per-VM variable Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 21:24 ` Chalamarla, Tirumalesh
2014-06-19 9:45 ` [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c Andre Przywara
` (6 subsequent siblings)
14 siblings, 1 reply; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
The MMIO accessors for GICD_I[CS]ENABLER, GICD_I[CS]PENDR and
GICD_ICFGR behave very similiar in GICv3, although the way the
affected vCPU is determined differs.
Factor out a generic, backend-facing implementation and use small
wrappers in the current GICv2 emulation to ease code sharing later.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic.c | 93 ++++++++++++++++++++++++++++-----------------------
1 file changed, 52 insertions(+), 41 deletions(-)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 2de58b3..2a59dff 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -398,35 +398,54 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
return false;
}
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+ u32 *reg;
+ int mode = ACCESS_READ_VALUE | access;
+ struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
+
+ reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
+ if (access & ACCESS_WRITE_CLEARBIT) {
+ if (offset < 4) /* Force SGI enabled */
+ *reg |= 0xffff;
+ vgic_retire_disabled_irqs(target_vcpu);
+ }
+ vgic_update_state(kvm);
return true;
}
return false;
}
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
+{
+ u32 *reg;
+ int mode = ACCESS_READ_VALUE | access;
+
+ reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_state, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
- if (offset < 4) /* Force SGI enabled */
- *reg |= 0xffff;
- vgic_retire_disabled_irqs(vcpu);
- vgic_update_state(vcpu->kvm);
+ vgic_update_state(kvm);
return true;
}
@@ -437,31 +456,16 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
- if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
}
static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
- if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
return false;
}
@@ -587,14 +591,10 @@ static u16 vgic_cfg_compress(u32 val)
* LSB is always 0. As such, we only keep the upper bit, and use the
* two above functions to compress/expand the bits
*/
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
u32 val;
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
if (offset & 4)
val = *reg >> 16;
@@ -623,6 +623,17 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
return false;
}
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors
2014-06-19 9:45 ` [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors Andre Przywara
@ 2014-06-19 21:24 ` Chalamarla, Tirumalesh
0 siblings, 0 replies; 25+ messages in thread
From: Chalamarla, Tirumalesh @ 2014-06-19 21:24 UTC (permalink / raw)
To: linux-arm-kernel
-----Original Message-----
From: kvmarm-bounces@lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
Sent: Thursday, June 19, 2014 2:46 AM
To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
Cc: christoffer.dall at linaro.org
Subject: [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors
The MMIO accessors for GICD_I[CS]ENABLER, GICD_I[CS]PENDR and GICD_ICFGR behave very similiar in GICv3, although the way the affected vCPU is determined differs.
Factor out a generic, backend-facing implementation and use small wrappers in the current GICv2 emulation to ease code sharing later.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic.c | 93 ++++++++++++++++++++++++++++-----------------------
1 file changed, 52 insertions(+), 41 deletions(-)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 2de58b3..2a59dff 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -398,35 +398,54 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
return false;
}
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+ u32 *reg;
+ int mode = ACCESS_READ_VALUE | access;
+ struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
+
+ reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
+ if (access & ACCESS_WRITE_CLEARBIT) {
+ if (offset < 4) /* Force SGI enabled */
+ *reg |= 0xffff;
+ vgic_retire_disabled_irqs(target_vcpu);
+ }
+ vgic_update_state(kvm);
return true;
}
return false;
}
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT); }
+
static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); }
+
+static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access) {
+ u32 *reg;
+ int mode = ACCESS_READ_VALUE | access;
+
+ reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_state, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset, mode);
if (mmio->is_write) {
- if (offset < 4) /* Force SGI enabled */
- *reg |= 0xffff;
- vgic_retire_disabled_irqs(vcpu);
- vgic_update_state(vcpu->kvm);
+ vgic_update_state(kvm);
return true;
}
@@ -437,31 +456,16 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
- if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
}
static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
- if (mmio->is_write) {
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
return false;
}
@@ -587,14 +591,10 @@ static u16 vgic_cfg_compress(u32 val)
* LSB is always 0. As such, we only keep the upper bit, and use the
* two above functions to compress/expand the bits
*/
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
u32 val;
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
if (offset & 4)
val = *reg >> 16;
@@ -623,6 +623,17 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
return false;
}
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset) {
+ u32 *reg;
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset); }
+
I think coding style needs to change for this function.
static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset) {
--
1.7.9.5
_______________________________________________
kvmarm mailing list
kvmarm at lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (7 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 21:27 ` Chalamarla, Tirumalesh
2014-06-19 9:45 ` [PATCH 10/14] arm/arm64: KVM: add opaque private pointer to MMIO accessors Andre Przywara
` (5 subsequent siblings)
14 siblings, 1 reply; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
vgic.c is currently a mixture of generic vGIC emulation code and
functions specific to emulating a GICv2. To ease the addition of
GICv3, split off strictly v2 specific parts into a new file
vgic-v2-emul.c.
A new header file vgic.h is introduced to allow separation and later
sharing of functions.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm/kvm/Makefile | 1 +
arch/arm64/kvm/Makefile | 1 +
virt/kvm/arm/vgic-v2-emul.c | 795 ++++++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 856 +++----------------------------------------
virt/kvm/arm/vgic.h | 113 ++++++
5 files changed, 956 insertions(+), 810 deletions(-)
create mode 100644 virt/kvm/arm/vgic-v2-emul.c
create mode 100644 virt/kvm/arm/vgic.h
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f7057ed..443b8be 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -22,4 +22,5 @@ obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 32a0961..f241db6 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,6 +20,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
new file mode 100644
index 0000000..ba5f873
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -0,0 +1,795 @@
+/*
+ * Contains GICv2 specific emulation code, was in vgic.c before.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+#define GICC_ARCH_VERSION_V2 0x2
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
+{
+ return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
+}
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+ u32 word_offset = offset & 3;
+
+ switch (offset & ~3) {
+ case 0: /* GICD_CTLR */
+ reg = vcpu->kvm->arch.vgic.enabled;
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vcpu->kvm->arch.vgic.enabled = reg & 1;
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ break;
+
+ case 4: /* GICD_TYPER */
+ reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+ reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+
+ case 8: /* GICD_IIDR */
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+#define GICD_ITARGETSR_SIZE 32
+#define GICD_CPUTARGETS_BITS 8
+#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
+static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+ u32 val = 0;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
+ val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
+
+ return val;
+}
+
+static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i, c;
+ unsigned long *bmap;
+ u32 target;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ /*
+ * Pick the LSB in each byte. This ensures we target exactly
+ * one vcpu per IRQ. If the byte is null, assume we target
+ * CPU0.
+ */
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
+ int shift = i * GICD_CPUTARGETS_BITS;
+ target = ffs((val >> shift) & 0xffU);
+ target = target ? (target - 1) : 0;
+ dist->irq_spi_cpu[irq + i] = target;
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
+ if (c == target)
+ set_bit(irq + i, bmap);
+ else
+ clear_bit(irq + i, bmap);
+ }
+ }
+}
+
+static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ /* We treat the banked interrupts targets as read-only */
+ if (offset < 32) {
+ u32 roreg = 1 << vcpu->vcpu_id;
+ roreg |= roreg << 8;
+ roreg |= roreg << 16;
+
+ vgic_reg_access(mmio, &roreg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
+ vgic_reg_access(mmio, ®, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+ vgic_reg_access(mmio, ®, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_dispatch_sgi(vcpu, reg);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
+static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3) * 4;
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg = 0;
+
+ /* Copy source SGIs from distributor side */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
+ reg |= ((u32)sources) << (8 * (sgi - min_sgi));
+ }
+
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, bool set)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3) * 4;
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg;
+ bool updated = false;
+
+ reg = mmio_data_read(mmio, ~0);
+
+ /* Clear pending SGIs on the distributor */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 mask = reg >> (8 * (sgi - min_sgi));
+ u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
+ if (set) {
+ if ((*src & mask) != mask)
+ updated = true;
+ *src |= mask;
+ } else {
+ if (*src & mask)
+ updated = true;
+ *src &= ~mask;
+ }
+ }
+
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+
+ return updated;
+}
+
+static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
+}
+
+static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
+}
+
+/*
+ * I would have liked to use the kvm_bus_io_*() API instead, but it
+ * cannot cope with banked registers (only the VM pointer is passed
+ * around, and we need the vcpu). One of these days, someone please
+ * fix it!
+ */
+static const struct mmio_range vgic_dist_ranges[] = {
+ {
+ .base = GIC_DIST_CTRL,
+ .len = 12,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc,
+ },
+ {
+ .base = GIC_DIST_IGROUP,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ENABLE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg,
+ },
+ {
+ .base = GIC_DIST_ENABLE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_PRI,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg,
+ },
+ {
+ .base = GIC_DIST_TARGET,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_target_reg,
+ },
+ {
+ .base = GIC_DIST_CONFIG,
+ .len = VGIC_MAX_IRQS / 4,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg,
+ },
+ {
+ .base = GIC_DIST_SOFTINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_sgi_reg,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_CLEAR,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_clear,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_SET,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_set,
+ },
+ {}
+};
+
+static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
+
+ if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
+ KVM_VGIC_V2_DIST_SIZE))
+ return false;
+
+ /* GICv2 does not support accesses wider than 32 bits */
+ if (mmio->len > 4) {
+ kvm_inject_dabt(vcpu, mmio->phys_addr);
+ return true;
+ }
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
+}
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int nrcpus = atomic_read(&kvm->online_vcpus);
+ u8 target_cpus;
+ int sgi, mode, c, vcpu_id;
+
+ vcpu_id = vcpu->vcpu_id;
+
+ sgi = reg & 0xf;
+ target_cpus = (reg >> 16) & 0xff;
+ mode = (reg >> 24) & 3;
+
+ switch (mode) {
+ case 0:
+ if (!target_cpus)
+ return;
+ break;
+
+ case 1:
+ target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
+ break;
+
+ case 2:
+ target_cpus = 1 << vcpu_id;
+ break;
+ }
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (target_cpus & 1) {
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set(vcpu, sgi);
+ *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n",
+ sgi, vcpu_id, c);
+ }
+
+ target_cpus >>= 1;
+ }
+}
+
+static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long sources;
+ int vcpu_id = vcpu->vcpu_id;
+ int c;
+
+ sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
+
+ for_each_set_bit(c, &sources, dist->nr_cpus) {
+ if (vgic_queue_irq(vcpu, c, irq))
+ clear_bit(c, &sources);
+ }
+
+ *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
+
+ /*
+ * If the sources bitmap has been cleared it means that we
+ * could queue all the SGIs onto link registers (see the
+ * clear_bit above), and therefore we are done with them in
+ * our emulated gic and can get rid of them.
+ */
+ if (!sources) {
+ vgic_dist_irq_clear(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret, i;
+
+ dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ return -ENXIO;
+ }
+
+ ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ return ret;
+ }
+
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ params->vcpu_base,
+ KVM_VGIC_V2_CPU_SIZE);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ return ret;
+ }
+
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ return 0;
+}
+
+static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
+}
+
+bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
+ dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
+ dist->vm_ops.vgic_init = vgic_v2_init;
+ return true;
+ }
+ return false;
+}
+
+static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ bool updated = false;
+ struct vgic_vmcr vmcr;
+ u32 *vmcr_field;
+ u32 reg;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+
+ switch (offset & ~0x3) {
+ case GIC_CPU_CTRL:
+ vmcr_field = &vmcr.ctlr;
+ break;
+ case GIC_CPU_PRIMASK:
+ vmcr_field = &vmcr.pmr;
+ break;
+ case GIC_CPU_BINPOINT:
+ vmcr_field = &vmcr.bpr;
+ break;
+ case GIC_CPU_ALIAS_BINPOINT:
+ vmcr_field = &vmcr.abpr;
+ break;
+ default:
+ BUG();
+ }
+
+ if (!mmio->is_write) {
+ reg = *vmcr_field;
+ mmio_data_write(mmio, ~0, reg);
+ } else {
+ reg = mmio_data_read(mmio, ~0);
+ if (reg != *vmcr_field) {
+ *vmcr_field = reg;
+ vgic_set_vmcr(vcpu, &vmcr);
+ updated = true;
+ }
+ }
+ return updated;
+}
+
+static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
+}
+
+static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ if (mmio->is_write)
+ return false;
+
+ /* GICC_IIDR */
+ reg = (PRODUCT_ID_KVM << 20) |
+ (GICC_ARCH_VERSION_V2 << 16) |
+ (IMPLEMENTER_ARM << 0);
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+/*
+ * CPU Interface Register accesses - these are not accessed by the VM, but by
+ * user space for saving and restoring VGIC state.
+ */
+static const struct mmio_range vgic_cpu_ranges[] = {
+ {
+ .base = GIC_CPU_CTRL,
+ .len = 12,
+ .handle_mmio = handle_cpu_mmio_misc,
+ },
+ {
+ .base = GIC_CPU_ALIAS_BINPOINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_abpr,
+ },
+ {
+ .base = GIC_CPU_ACTIVEPRIO,
+ .len = 16,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_CPU_IDENT,
+ .len = 4,
+ .handle_mmio = handle_cpu_mmio_ident,
+ },
+};
+
+static int vgic_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u32 *reg, bool is_write)
+{
+ const struct mmio_range *r = NULL, *ranges;
+ phys_addr_t offset;
+ int ret, cpuid, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ struct vgic_dist *vgic;
+ struct kvm_exit_mmio mmio;
+
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+ mutex_lock(&dev->kvm->lock);
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ vgic = &dev->kvm->arch.vgic;
+
+ mmio.len = 4;
+ mmio.is_write = is_write;
+ if (is_write)
+ mmio_data_write(&mmio, ~0, *reg);
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ mmio.phys_addr = vgic->vgic_dist_base + offset;
+ ranges = vgic_dist_ranges;
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ mmio.phys_addr = vgic->vgic_cpu_base + offset;
+ ranges = vgic_cpu_ranges;
+ break;
+ default:
+ BUG();
+ }
+ r = vgic_find_matching_range(ranges, &mmio, offset);
+
+ if (unlikely(!r || !r->handle_mmio)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+
+ spin_lock(&vgic->lock);
+
+ /*
+ * Ensure that no other VCPU is running by checking the vcpu->cpu
+ * field. If no other VPCUs are running we can safely access the VGIC
+ * state, because even if another VPU is run after this point, that
+ * VCPU will not touch the vgic state, because it will block on
+ * getting the vgic->lock in kvm_vgic_sync_hwstate().
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (unlikely(tmp_vcpu->cpu != -1)) {
+ ret = -EBUSY;
+ goto out_vgic_unlock;
+ }
+ }
+
+ /*
+ * Move all pending IRQs from the LRs on all VCPUs so the pending
+ * state can be properly represented in the register state accessible
+ * through this API.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
+ vgic_unqueue_irqs(tmp_vcpu);
+
+ offset -= r->base;
+ r->handle_mmio(vcpu, &mmio, offset);
+
+ if (!is_write)
+ *reg = mmio_data_read(&mmio, ~0);
+
+ ret = 0;
+out_vgic_unlock:
+ spin_unlock(&vgic->lock);
+out:
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
+}
+
+static int vgic_v2_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ phys_addr_t offset;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_dist_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_cpu_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
+ return 0;
+ }
+ return -ENXIO;
+}
+
+static int vgic_v2_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (!ret)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ if (get_user(reg, uaddr))
+ return -EFAULT;
+ return vgic_attr_regs_access(dev, attr, ®, true);
+ }
+ return -ENXIO;
+}
+
+static int vgic_v2_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg = 0;
+ int r;
+
+ r = vgic_get_common_attr(dev, attr);
+ if (!r)
+ return r;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ r = vgic_attr_regs_access(dev, attr, ®, false);
+ if (r)
+ return r;
+ r = put_user(reg, uaddr);
+ break;
+ }
+
+ return r;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+ .name = "kvm-arm-vgic-v2",
+ .create = vgic_create,
+ .destroy = vgic_destroy,
+ .set_attr = vgic_v2_set_attr,
+ .get_attr = vgic_v2_get_attr,
+ .has_attr = vgic_v2_has_attr,
+};
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 2a59dff..0140505 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -69,32 +69,14 @@
* interrupt line to be sampled again.
*/
-#define VGIC_ADDR_UNDEF (-1)
-#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+#include "vgic.h"
-#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
-#define IMPLEMENTER_ARM 0x43b
#define GICC_ARCH_VERSION_V2 0x2
-#define ACCESS_READ_VALUE (1 << 0)
-#define ACCESS_READ_RAZ (0 << 0)
-#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
-#define ACCESS_WRITE_IGNORED (0 << 1)
-#define ACCESS_WRITE_SETBIT (1 << 1)
-#define ACCESS_WRITE_CLEARBIT (2 << 1)
-#define ACCESS_WRITE_VALUE (3 << 1)
-#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
-
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
-static void vgic_update_state(struct kvm *kvm);
-static void vgic_kick_vcpus(struct kvm *kvm);
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
-static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
@@ -122,8 +104,7 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
kfree(b->private);
}
-static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
- int cpuid, u32 offset)
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
{
offset >>= 2;
if (!offset)
@@ -141,8 +122,8 @@ static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
}
-static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val)
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val)
{
unsigned long *reg;
@@ -164,7 +145,7 @@ static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
return x->private + cpuid;
}
-static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
{
return x->shared;
}
@@ -190,7 +171,7 @@ static void vgic_free_bytemap(struct vgic_bytemap *b)
kfree(b->private);
}
-static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
{
u32 *reg;
@@ -252,14 +233,14 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
}
-static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
}
-static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -275,7 +256,7 @@ static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
vcpu->arch.vgic_cpu.pending_shared);
}
-static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
if (irq < VGIC_NR_PRIVATE_IRQS)
clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
@@ -284,16 +265,6 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
vcpu->arch.vgic_cpu.pending_shared);
}
-static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
-{
- return *((u32 *)mmio->data) & mask;
-}
-
-static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
-{
- *((u32 *)mmio->data) = value & mask;
-}
-
/**
* vgic_reg_access - access vgic register
* @mmio: pointer to the data describing the mmio access
@@ -305,7 +276,7 @@ static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
* modes defined for vgic register access
* (read,raz,write-ignored,setbit,clearbit,write)
*/
-static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
phys_addr_t offset, int mode)
{
int word_offset = (offset & 3) * 8;
@@ -355,42 +326,7 @@ static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
}
}
-static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- u32 word_offset = offset & 3;
-
- switch (offset & ~3) {
- case 0: /* GICD_CTLR */
- reg = vcpu->kvm->arch.vgic.enabled;
- vgic_reg_access(mmio, ®, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vcpu->kvm->arch.vgic.enabled = reg & 1;
- vgic_update_state(vcpu->kvm);
- return true;
- }
- break;
-
- case 4: /* GICD_TYPER */
- reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
- reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
- vgic_reg_access(mmio, ®, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
-
- case 8: /* GICD_IIDR */
- reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
- vgic_reg_access(mmio, ®, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
- }
-
- return false;
-}
-
-static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
vgic_reg_access(mmio, NULL, offset,
@@ -398,8 +334,8 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
return false;
}
-static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access)
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
u32 *reg;
int mode = ACCESS_READ_VALUE | access;
@@ -420,24 +356,8 @@ static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
return false;
}
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
-}
-
-static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
-}
-
-static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access)
+bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
u32 *reg;
int mode = ACCESS_READ_VALUE | access;
@@ -452,110 +372,6 @@ static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
return false;
}
-static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
-}
-
-static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
- return false;
-}
-
-static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-#define GICD_ITARGETSR_SIZE 32
-#define GICD_CPUTARGETS_BITS 8
-#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
-static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int i;
- u32 val = 0;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
- val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
-
- return val;
-}
-
-static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i, c;
- unsigned long *bmap;
- u32 target;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- /*
- * Pick the LSB in each byte. This ensures we target exactly
- * one vcpu per IRQ. If the byte is null, assume we target
- * CPU0.
- */
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
- int shift = i * GICD_CPUTARGETS_BITS;
- target = ffs((val >> shift) & 0xffU);
- target = target ? (target - 1) : 0;
- dist->irq_spi_cpu[irq + i] = target;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
- if (c == target)
- set_bit(irq + i, bmap);
- else
- clear_bit(irq + i, bmap);
- }
- }
-}
-
-static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- /* We treat the banked interrupts targets as read-only */
- if (offset < 32) {
- u32 roreg = 1 << vcpu->vcpu_id;
- roreg |= roreg << 8;
- roreg |= roreg << 16;
-
- vgic_reg_access(mmio, &roreg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
- vgic_reg_access(mmio, ®, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
static u32 vgic_cfg_expand(u16 val)
{
u32 res = 0;
@@ -591,8 +407,8 @@ static u16 vgic_cfg_compress(u32 val)
* LSB is always 0. As such, we only keep the upper bit, and use the
* two above functions to compress/expand the bits
*/
-static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
u32 val;
@@ -623,32 +439,6 @@ static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
return false;
}
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
-
- return vgic_handle_cfg_reg(reg, mmio, offset);
-}
-
-static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- vgic_reg_access(mmio, ®, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_dispatch_sgi(vcpu, reg);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
/**
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -661,14 +451,7 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
* to the distributor but the active state stays in the LRs, because we don't
* track the active state on the distributor side.
*/
-
-static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
-}
-static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int i;
@@ -715,196 +498,18 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
}
}
-/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
-static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3) * 4;
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg = 0;
-
- /* Copy source SGIs from distributor side */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- int shift = 8 * (sgi - min_sgi);
- reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
- }
-
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, bool set)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3) * 4;
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg;
- bool updated = false;
-
- reg = mmio_data_read(mmio, ~0);
-
- /* Clear pending SGIs on the distributor */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- u8 mask = reg >> (8 * (sgi - min_sgi));
- u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
- if (set) {
- if ((*src & mask) != mask)
- updated = true;
- *src |= mask;
- } else {
- if (*src & mask)
- updated = true;
- *src &= ~mask;
- }
- }
-
- if (updated)
- vgic_update_state(vcpu->kvm);
-
- return updated;
-}
-
-static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
-}
-
-static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
-}
-
-/*
- * I would have liked to use the kvm_bus_io_*() API instead, but it
- * cannot cope with banked registers (only the VM pointer is passed
- * around, and we need the vcpu). One of these days, someone please
- * fix it!
- */
-struct mmio_range {
- phys_addr_t base;
- unsigned long len;
- int bits_per_irq;
- bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-};
-
-static const struct mmio_range vgic_dist_ranges[] = {
- {
- .base = GIC_DIST_CTRL,
- .len = 12,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_misc,
- },
- {
- .base = GIC_DIST_IGROUP,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ENABLE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg,
- },
- {
- .base = GIC_DIST_ENABLE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg,
- },
- {
- .base = GIC_DIST_PENDING_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg,
- },
- {
- .base = GIC_DIST_PENDING_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg,
- },
- {
- .base = GIC_DIST_ACTIVE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ACTIVE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_PRI,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg,
- },
- {
- .base = GIC_DIST_TARGET,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_target_reg,
- },
- {
- .base = GIC_DIST_CONFIG,
- .len = VGIC_MAX_IRQS / 4,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg,
- },
- {
- .base = GIC_DIST_SOFTINT,
- .len = 4,
- .handle_mmio = handle_mmio_sgi_reg,
- },
- {
- .base = GIC_DIST_SGI_PENDING_CLEAR,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_clear,
- },
- {
- .base = GIC_DIST_SGI_PENDING_SET,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_set,
- },
- {}
-};
-
-static const
-struct mmio_range *find_matching_range(const struct mmio_range *ranges,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+const
+struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
const struct mmio_range *r = ranges;
-
while (r->len) {
if (offset >= r->base &&
(offset + mmio->len) <= (r->base + r->len))
return r;
r++;
}
-
return NULL;
}
@@ -976,7 +581,7 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
*
* returns true if the MMIO access could be performed
*/
-static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio,
const struct mmio_range *ranges,
unsigned long mmio_base)
@@ -987,7 +592,7 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long offset;
offset = mmio->phys_addr - mmio_base;
- range = find_matching_range(ranges, mmio, offset);
+ range = vgic_find_matching_range(ranges, mmio, offset);
if (unlikely(!range || !range->handle_mmio)) {
pr_warn("Unhandled access %d %08llx %d\n",
mmio->is_write, mmio->phys_addr, mmio->len);
@@ -1013,27 +618,6 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true;
}
-#define IS_IN_RANGE(addr, alen, base, len) \
- (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
-
-static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
-
- if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
- KVM_VGIC_V2_DIST_SIZE))
- return false;
-
- /* GICv2 does not support accesses wider than 32 bits */
- if (mmio->len > 4) {
- kvm_inject_dabt(vcpu, mmio->phys_addr);
- return true;
- }
-
- return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
-}
-
/**
* vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
* @vcpu: pointer to the vcpu performing the access
@@ -1052,52 +636,6 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return vgic_vm_op(vcpu->kvm, handle_mmio)(vcpu, run, mmio);
}
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
-{
- return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
-}
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
-{
- struct kvm *kvm = vcpu->kvm;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- u8 target_cpus;
- int sgi, mode, c, vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
-
- sgi = reg & 0xf;
- target_cpus = (reg >> 16) & 0xff;
- mode = (reg >> 24) & 3;
-
- switch (mode) {
- case 0:
- if (!target_cpus)
- return;
- break;
-
- case 1:
- target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
- break;
-
- case 2:
- target_cpus = 1 << vcpu_id;
- break;
- }
-
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (target_cpus & 1) {
- /* Flag the SGI as pending */
- vgic_dist_irq_set(vcpu, sgi);
- *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
- kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
- }
-
- target_cpus >>= 1;
- }
-}
-
static int vgic_nr_shared_irqs(struct vgic_dist *dist)
{
return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
@@ -1136,7 +674,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
* Update the interrupt state and determine which CPUs have pending
* interrupts. Must be called with distributor lock held.
*/
-static void vgic_update_state(struct kvm *kvm)
+void vgic_update_state(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1197,12 +735,12 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
vgic_ops->disable_underflow(vcpu);
}
-static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->get_vmcr(vcpu, vmcr);
}
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->set_vmcr(vcpu, vmcr);
}
@@ -1251,8 +789,9 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
/*
* Queue an interrupt to a CPU virtual interface. Return true on success,
* or false if it wasn't possible to queue it.
+ * sgi_source must be zero for any non-SGI interrupts.
*/
-static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -1301,37 +840,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return true;
}
-static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long sources;
- int vcpu_id = vcpu->vcpu_id;
- int c;
-
- sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
-
- for_each_set_bit(c, &sources, dist->nr_cpus) {
- if (vgic_queue_irq(vcpu, c, irq))
- clear_bit(c, &sources);
- }
-
- *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
-
- /*
- * If the sources bitmap has been cleared it means that we
- * could queue all the SGIs onto link registers (see the
- * clear_bit above), and therefore we are done with them in
- * our emulated gic and can get rid of them.
- */
- if (!sources) {
- vgic_dist_irq_clear(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- return true;
- }
-
- return false;
-}
-
static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
{
if (vgic_irq_is_active(vcpu, irq))
@@ -1523,7 +1031,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}
-static void vgic_kick_vcpus(struct kvm *kvm)
+void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int c;
@@ -1807,7 +1315,7 @@ static void vgic_free_maps(struct vgic_dist *dist)
kfree(dist->irq_spi_target);
}
-static int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
+int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
{
int ret, i;
@@ -1822,7 +1330,8 @@ static int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
GFP_KERNEL);
dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS,
GFP_KERNEL);
- dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
+ dist->irq_spi_target = kcalloc(nr_cpus,
+ sizeof(*dist->irq_spi_target),
GFP_KERNEL);
if (!dist->irq_sgi_sources ||
!dist->irq_spi_cpu ||
@@ -1849,39 +1358,6 @@ void kvm_vgic_destroy(struct kvm *kvm)
vgic_free_maps(&kvm->arch.vgic);
}
-static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int ret, i;
-
- dist->nr_cpus = atomic_read(&kvm->online_vcpus);
-
- if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
- kvm_err("Need to set vgic distributor addresses first\n");
- return -ENXIO;
- }
-
- ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
- if (ret) {
- kvm_err("Unable to allocate maps\n");
- return ret;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
- params->vcpu_base,
- KVM_VGIC_V2_CPU_SIZE);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- return ret;
- }
-
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
-
- return 0;
-}
-
/**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
@@ -1937,15 +1413,9 @@ out:
static bool init_emulation_ops(struct kvm *kvm, int type)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
-
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
- dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
- dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
- dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
- dist->vm_ops.vgic_init = vgic_v2_init;
- return true;
+ return vgic_v2_init_emulation_ops(kvm, type);
}
return false;
}
@@ -2086,185 +1556,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
return r;
}
-static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+int vgic_has_attr_regs(const struct mmio_range *ranges,
+ phys_addr_t offset)
{
- bool updated = false;
- struct vgic_vmcr vmcr;
- u32 *vmcr_field;
- u32 reg;
-
- vgic_get_vmcr(vcpu, &vmcr);
-
- switch (offset & ~0x3) {
- case GIC_CPU_CTRL:
- vmcr_field = &vmcr.ctlr;
- break;
- case GIC_CPU_PRIMASK:
- vmcr_field = &vmcr.pmr;
- break;
- case GIC_CPU_BINPOINT:
- vmcr_field = &vmcr.bpr;
- break;
- case GIC_CPU_ALIAS_BINPOINT:
- vmcr_field = &vmcr.abpr;
- break;
- default:
- BUG();
- }
-
- if (!mmio->is_write) {
- reg = *vmcr_field;
- mmio_data_write(mmio, ~0, reg);
- } else {
- reg = mmio_data_read(mmio, ~0);
- if (reg != *vmcr_field) {
- *vmcr_field = reg;
- vgic_set_vmcr(vcpu, &vmcr);
- updated = true;
- }
- }
- return updated;
-}
-
-static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
-}
-
-static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- if (mmio->is_write)
- return false;
-
- /* GICC_IIDR */
- reg = (PRODUCT_ID_KVM << 20) |
- (GICC_ARCH_VERSION_V2 << 16) |
- (IMPLEMENTER_ARM << 0);
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-/*
- * CPU Interface Register accesses - these are not accessed by the VM, but by
- * user space for saving and restoring VGIC state.
- */
-static const struct mmio_range vgic_cpu_ranges[] = {
- {
- .base = GIC_CPU_CTRL,
- .len = 12,
- .handle_mmio = handle_cpu_mmio_misc,
- },
- {
- .base = GIC_CPU_ALIAS_BINPOINT,
- .len = 4,
- .handle_mmio = handle_mmio_abpr,
- },
- {
- .base = GIC_CPU_ACTIVEPRIO,
- .len = 16,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_CPU_IDENT,
- .len = 4,
- .handle_mmio = handle_cpu_mmio_ident,
- },
-};
-
-static int vgic_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u32 *reg, bool is_write)
-{
- const struct mmio_range *r = NULL, *ranges;
- phys_addr_t offset;
- int ret, cpuid, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- struct vgic_dist *vgic;
- struct kvm_exit_mmio mmio;
-
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-
- mutex_lock(&dev->kvm->lock);
-
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
- goto out;
- }
-
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- vgic = &dev->kvm->arch.vgic;
-
- mmio.len = 4;
- mmio.is_write = is_write;
- if (is_write)
- mmio_data_write(&mmio, ~0, *reg);
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- mmio.phys_addr = vgic->vgic_dist_base + offset;
- ranges = vgic_dist_ranges;
- break;
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- mmio.phys_addr = vgic->vgic_cpu_base + offset;
- ranges = vgic_cpu_ranges;
- break;
- default:
- BUG();
- }
- r = find_matching_range(ranges, &mmio, offset);
-
- if (unlikely(!r || !r->handle_mmio)) {
- ret = -ENXIO;
- goto out;
- }
-
-
- spin_lock(&vgic->lock);
-
- /*
- * Ensure that no other VCPU is running by checking the vcpu->cpu
- * field. If no other VPCUs are running we can safely access the VGIC
- * state, because even if another VPU is run after this point, that
- * VCPU will not touch the vgic state, because it will block on
- * getting the vgic->lock in kvm_vgic_sync_hwstate().
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (unlikely(tmp_vcpu->cpu != -1)) {
- ret = -EBUSY;
- goto out_vgic_unlock;
- }
- }
-
- /*
- * Move all pending IRQs from the LRs on all VCPUs so the pending
- * state can be properly represented in the register state accessible
- * through this API.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
- vgic_unqueue_irqs(tmp_vcpu);
-
- offset -= r->base;
- r->handle_mmio(vcpu, &mmio, offset);
-
- if (!is_write)
- *reg = mmio_data_read(&mmio, ~0);
+ struct kvm_exit_mmio dev_attr_mmio;
- ret = 0;
-out_vgic_unlock:
- spin_unlock(&vgic->lock);
-out:
- mutex_unlock(&dev->kvm->lock);
- return ret;
+ dev_attr_mmio.len = 4;
+ if (vgic_find_matching_range(ranges, &dev_attr_mmio, offset))
+ return 0;
+ else
+ return -ENXIO;
}
-static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
@@ -2281,16 +1585,6 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return (r == -ENODEV) ? -ENXIO : r;
}
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg;
-
- if (get_user(reg, uaddr))
- return -EFAULT;
-
- return vgic_attr_regs_access(dev, attr, ®, true);
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 val;
@@ -2319,7 +1613,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -ENXIO;
}
-static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = -ENXIO;
@@ -2337,18 +1631,6 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -EFAULT;
break;
}
-
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg = 0;
-
- r = vgic_attr_regs_access(dev, attr, ®, false);
- if (r)
- return r;
- r = put_user(reg, uaddr);
- break;
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
@@ -2366,58 +1648,12 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return r;
}
-static int vgic_has_attr_regs(const struct mmio_range *ranges,
- phys_addr_t offset)
-{
- struct kvm_exit_mmio dev_attr_mmio;
-
- dev_attr_mmio.len = 4;
- if (find_matching_range(ranges, &dev_attr_mmio, offset))
- return 0;
- else
- return -ENXIO;
-}
-
-static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-{
- phys_addr_t offset;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR:
- switch (attr->attr) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- return 0;
- }
- break;
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_dist_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_cpu_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
- case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
- return 0;
- }
- return -ENXIO;
-}
-
-static void vgic_destroy(struct kvm_device *dev)
+void vgic_destroy(struct kvm_device *dev)
{
kfree(dev);
}
-static int vgic_create(struct kvm_device *dev, u32 type)
+int vgic_create(struct kvm_device *dev, u32 type)
{
return kvm_vgic_create(dev->kvm, type);
}
-
-struct kvm_device_ops kvm_arm_vgic_v2_ops = {
- .name = "kvm-arm-vgic",
- .create = vgic_create,
- .destroy = vgic_destroy,
- .set_attr = vgic_set_attr,
- .get_attr = vgic_get_attr,
- .has_attr = vgic_has_attr,
-};
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
new file mode 100644
index 0000000..e900aeb
--- /dev/null
+++ b/virt/kvm/arm/vgic.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012-2014 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from virt/kvm/arm/vgic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define VGIC_ADDR_UNDEF (-1)
+#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+
+#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
+#define IMPLEMENTER_ARM 0x43b
+
+#define ACCESS_READ_VALUE (1 << 0)
+#define ACCESS_READ_RAZ (0 << 0)
+#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
+#define ACCESS_WRITE_IGNORED (0 << 1)
+#define ACCESS_WRITE_SETBIT (1 << 1)
+#define ACCESS_WRITE_CLEARBIT (2 << 1)
+#define ACCESS_WRITE_VALUE (3 << 1)
+#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
+
+#define VCPU_NOT_ALLOCATED ((u8)-1)
+
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
+
+void vgic_update_state(struct kvm *kvm);
+int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs);
+
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
+
+void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq);
+void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq);
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val);
+
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
+
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+ phys_addr_t offset, int mode);
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+static inline
+u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
+{
+ return *((u32 *)mmio->data) & mask;
+}
+
+static inline
+void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
+{
+ *((u32 *)mmio->data) = value & mask;
+}
+
+struct mmio_range {
+ phys_addr_t base;
+ unsigned long len;
+ int bits_per_irq;
+ bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+};
+
+#define IS_IN_RANGE(addr, alen, base, len) \
+ (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
+
+const
+struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct mmio_range *ranges,
+ unsigned long mmio_base);
+
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access);
+
+bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access);
+
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+void vgic_kick_vcpus(struct kvm *kvm);
+
+int vgic_create(struct kvm_device *dev, u32 type);
+void vgic_destroy(struct kvm_device *dev);
+
+int vgic_has_attr_regs(const struct mmio_range *ranges, phys_addr_t offset);
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+
+bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
2014-06-19 9:45 ` [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c Andre Przywara
@ 2014-06-19 21:27 ` Chalamarla, Tirumalesh
2014-06-20 8:43 ` Andre Przywara
0 siblings, 1 reply; 25+ messages in thread
From: Chalamarla, Tirumalesh @ 2014-06-19 21:27 UTC (permalink / raw)
To: linux-arm-kernel
Ok, what is the need for separate file why can't we use vgic-v2.c itself, any reason which I am missing.
-----Original Message-----
From: kvmarm-bounces@lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
Sent: Thursday, June 19, 2014 2:46 AM
To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
Cc: christoffer.dall at linaro.org
Subject: [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
vgic.c is currently a mixture of generic vGIC emulation code and
functions specific to emulating a GICv2. To ease the addition of
GICv3, split off strictly v2 specific parts into a new file
vgic-v2-emul.c.
A new header file vgic.h is introduced to allow separation and later
sharing of functions.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm/kvm/Makefile | 1 +
arch/arm64/kvm/Makefile | 1 +
virt/kvm/arm/vgic-v2-emul.c | 795 ++++++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 856 +++----------------------------------------
virt/kvm/arm/vgic.h | 113 ++++++
5 files changed, 956 insertions(+), 810 deletions(-)
create mode 100644 virt/kvm/arm/vgic-v2-emul.c
create mode 100644 virt/kvm/arm/vgic.h
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f7057ed..443b8be 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -22,4 +22,5 @@ obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 32a0961..f241db6 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,6 +20,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
new file mode 100644
index 0000000..ba5f873
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -0,0 +1,795 @@
+/*
+ * Contains GICv2 specific emulation code, was in vgic.c before.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+#define GICC_ARCH_VERSION_V2 0x2
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
+{
+ return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
+}
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+ u32 word_offset = offset & 3;
+
+ switch (offset & ~3) {
+ case 0: /* GICD_CTLR */
+ reg = vcpu->kvm->arch.vgic.enabled;
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vcpu->kvm->arch.vgic.enabled = reg & 1;
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ break;
+
+ case 4: /* GICD_TYPER */
+ reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+ reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+
+ case 8: /* GICD_IIDR */
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+#define GICD_ITARGETSR_SIZE 32
+#define GICD_CPUTARGETS_BITS 8
+#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
+static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+ u32 val = 0;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
+ val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
+
+ return val;
+}
+
+static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i, c;
+ unsigned long *bmap;
+ u32 target;
+
+ irq -= VGIC_NR_PRIVATE_IRQS;
+
+ /*
+ * Pick the LSB in each byte. This ensures we target exactly
+ * one vcpu per IRQ. If the byte is null, assume we target
+ * CPU0.
+ */
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
+ int shift = i * GICD_CPUTARGETS_BITS;
+ target = ffs((val >> shift) & 0xffU);
+ target = target ? (target - 1) : 0;
+ dist->irq_spi_cpu[irq + i] = target;
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
+ if (c == target)
+ set_bit(irq + i, bmap);
+ else
+ clear_bit(irq + i, bmap);
+ }
+ }
+}
+
+static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ /* We treat the banked interrupts targets as read-only */
+ if (offset < 32) {
+ u32 roreg = 1 << vcpu->vcpu_id;
+ roreg |= roreg << 8;
+ roreg |= roreg << 16;
+
+ vgic_reg_access(mmio, &roreg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
+ vgic_reg_access(mmio, ®, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 *reg;
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ u32 reg;
+ vgic_reg_access(mmio, ®, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vgic_dispatch_sgi(vcpu, reg);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+
+ return false;
+}
+
+/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
+static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3) * 4;
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg = 0;
+
+ /* Copy source SGIs from distributor side */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
+ reg |= ((u32)sources) << (8 * (sgi - min_sgi));
+ }
+
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, bool set)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int sgi;
+ int min_sgi = (offset & ~0x3) * 4;
+ int max_sgi = min_sgi + 3;
+ int vcpu_id = vcpu->vcpu_id;
+ u32 reg;
+ bool updated = false;
+
+ reg = mmio_data_read(mmio, ~0);
+
+ /* Clear pending SGIs on the distributor */
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
+ u8 mask = reg >> (8 * (sgi - min_sgi));
+ u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
+ if (set) {
+ if ((*src & mask) != mask)
+ updated = true;
+ *src |= mask;
+ } else {
+ if (*src & mask)
+ updated = true;
+ *src &= ~mask;
+ }
+ }
+
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+
+ return updated;
+}
+
+static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
+}
+
+static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ if (!mmio->is_write)
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
+ else
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
+}
+
+/*
+ * I would have liked to use the kvm_bus_io_*() API instead, but it
+ * cannot cope with banked registers (only the VM pointer is passed
+ * around, and we need the vcpu). One of these days, someone please
+ * fix it!
+ */
+static const struct mmio_range vgic_dist_ranges[] = {
+ {
+ .base = GIC_DIST_CTRL,
+ .len = 12,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc,
+ },
+ {
+ .base = GIC_DIST_IGROUP,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ENABLE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg,
+ },
+ {
+ .base = GIC_DIST_ENABLE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg,
+ },
+ {
+ .base = GIC_DIST_PENDING_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_SET,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_ACTIVE_CLEAR,
+ .len = VGIC_MAX_IRQS / 8,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_DIST_PRI,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg,
+ },
+ {
+ .base = GIC_DIST_TARGET,
+ .len = VGIC_MAX_IRQS,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_target_reg,
+ },
+ {
+ .base = GIC_DIST_CONFIG,
+ .len = VGIC_MAX_IRQS / 4,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg,
+ },
+ {
+ .base = GIC_DIST_SOFTINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_sgi_reg,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_CLEAR,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_clear,
+ },
+ {
+ .base = GIC_DIST_SGI_PENDING_SET,
+ .len = VGIC_NR_SGIS,
+ .handle_mmio = handle_mmio_sgi_set,
+ },
+ {}
+};
+
+static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
+
+ if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
+ KVM_VGIC_V2_DIST_SIZE))
+ return false;
+
+ /* GICv2 does not support accesses wider than 32 bits */
+ if (mmio->len > 4) {
+ kvm_inject_dabt(vcpu, mmio->phys_addr);
+ return true;
+ }
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
+}
+
+static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int nrcpus = atomic_read(&kvm->online_vcpus);
+ u8 target_cpus;
+ int sgi, mode, c, vcpu_id;
+
+ vcpu_id = vcpu->vcpu_id;
+
+ sgi = reg & 0xf;
+ target_cpus = (reg >> 16) & 0xff;
+ mode = (reg >> 24) & 3;
+
+ switch (mode) {
+ case 0:
+ if (!target_cpus)
+ return;
+ break;
+
+ case 1:
+ target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
+ break;
+
+ case 2:
+ target_cpus = 1 << vcpu_id;
+ break;
+ }
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (target_cpus & 1) {
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set(vcpu, sgi);
+ *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n",
+ sgi, vcpu_id, c);
+ }
+
+ target_cpus >>= 1;
+ }
+}
+
+static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long sources;
+ int vcpu_id = vcpu->vcpu_id;
+ int c;
+
+ sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
+
+ for_each_set_bit(c, &sources, dist->nr_cpus) {
+ if (vgic_queue_irq(vcpu, c, irq))
+ clear_bit(c, &sources);
+ }
+
+ *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
+
+ /*
+ * If the sources bitmap has been cleared it means that we
+ * could queue all the SGIs onto link registers (see the
+ * clear_bit above), and therefore we are done with them in
+ * our emulated gic and can get rid of them.
+ */
+ if (!sources) {
+ vgic_dist_irq_clear(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret, i;
+
+ dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ return -ENXIO;
+ }
+
+ ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ return ret;
+ }
+
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ params->vcpu_base,
+ KVM_VGIC_V2_CPU_SIZE);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ return ret;
+ }
+
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ return 0;
+}
+
+static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
+}
+
+bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
+ dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
+ dist->vm_ops.vgic_init = vgic_v2_init;
+ return true;
+ }
+ return false;
+}
+
+static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ bool updated = false;
+ struct vgic_vmcr vmcr;
+ u32 *vmcr_field;
+ u32 reg;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+
+ switch (offset & ~0x3) {
+ case GIC_CPU_CTRL:
+ vmcr_field = &vmcr.ctlr;
+ break;
+ case GIC_CPU_PRIMASK:
+ vmcr_field = &vmcr.pmr;
+ break;
+ case GIC_CPU_BINPOINT:
+ vmcr_field = &vmcr.bpr;
+ break;
+ case GIC_CPU_ALIAS_BINPOINT:
+ vmcr_field = &vmcr.abpr;
+ break;
+ default:
+ BUG();
+ }
+
+ if (!mmio->is_write) {
+ reg = *vmcr_field;
+ mmio_data_write(mmio, ~0, reg);
+ } else {
+ reg = mmio_data_read(mmio, ~0);
+ if (reg != *vmcr_field) {
+ *vmcr_field = reg;
+ vgic_set_vmcr(vcpu, &vmcr);
+ updated = true;
+ }
+ }
+ return updated;
+}
+
+static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset)
+{
+ return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
+}
+
+static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+
+ if (mmio->is_write)
+ return false;
+
+ /* GICC_IIDR */
+ reg = (PRODUCT_ID_KVM << 20) |
+ (GICC_ARCH_VERSION_V2 << 16) |
+ (IMPLEMENTER_ARM << 0);
+ mmio_data_write(mmio, ~0, reg);
+ return false;
+}
+
+/*
+ * CPU Interface Register accesses - these are not accessed by the VM, but by
+ * user space for saving and restoring VGIC state.
+ */
+static const struct mmio_range vgic_cpu_ranges[] = {
+ {
+ .base = GIC_CPU_CTRL,
+ .len = 12,
+ .handle_mmio = handle_cpu_mmio_misc,
+ },
+ {
+ .base = GIC_CPU_ALIAS_BINPOINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_abpr,
+ },
+ {
+ .base = GIC_CPU_ACTIVEPRIO,
+ .len = 16,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GIC_CPU_IDENT,
+ .len = 4,
+ .handle_mmio = handle_cpu_mmio_ident,
+ },
+};
+
+static int vgic_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u32 *reg, bool is_write)
+{
+ const struct mmio_range *r = NULL, *ranges;
+ phys_addr_t offset;
+ int ret, cpuid, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ struct vgic_dist *vgic;
+ struct kvm_exit_mmio mmio;
+
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+ mutex_lock(&dev->kvm->lock);
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ vgic = &dev->kvm->arch.vgic;
+
+ mmio.len = 4;
+ mmio.is_write = is_write;
+ if (is_write)
+ mmio_data_write(&mmio, ~0, *reg);
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ mmio.phys_addr = vgic->vgic_dist_base + offset;
+ ranges = vgic_dist_ranges;
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ mmio.phys_addr = vgic->vgic_cpu_base + offset;
+ ranges = vgic_cpu_ranges;
+ break;
+ default:
+ BUG();
+ }
+ r = vgic_find_matching_range(ranges, &mmio, offset);
+
+ if (unlikely(!r || !r->handle_mmio)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+
+ spin_lock(&vgic->lock);
+
+ /*
+ * Ensure that no other VCPU is running by checking the vcpu->cpu
+ * field. If no other VPCUs are running we can safely access the VGIC
+ * state, because even if another VPU is run after this point, that
+ * VCPU will not touch the vgic state, because it will block on
+ * getting the vgic->lock in kvm_vgic_sync_hwstate().
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (unlikely(tmp_vcpu->cpu != -1)) {
+ ret = -EBUSY;
+ goto out_vgic_unlock;
+ }
+ }
+
+ /*
+ * Move all pending IRQs from the LRs on all VCPUs so the pending
+ * state can be properly represented in the register state accessible
+ * through this API.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
+ vgic_unqueue_irqs(tmp_vcpu);
+
+ offset -= r->base;
+ r->handle_mmio(vcpu, &mmio, offset);
+
+ if (!is_write)
+ *reg = mmio_data_read(&mmio, ~0);
+
+ ret = 0;
+out_vgic_unlock:
+ spin_unlock(&vgic->lock);
+out:
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
+}
+
+static int vgic_v2_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ phys_addr_t offset;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_dist_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ return vgic_has_attr_regs(vgic_cpu_ranges, offset);
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
+ return 0;
+ }
+ return -ENXIO;
+}
+
+static int vgic_v2_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (!ret)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ if (get_user(reg, uaddr))
+ return -EFAULT;
+ return vgic_attr_regs_access(dev, attr, ®, true);
+ }
+ return -ENXIO;
+}
+
+static int vgic_v2_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg = 0;
+ int r;
+
+ r = vgic_get_common_attr(dev, attr);
+ if (!r)
+ return r;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ r = vgic_attr_regs_access(dev, attr, ®, false);
+ if (r)
+ return r;
+ r = put_user(reg, uaddr);
+ break;
+ }
+
+ return r;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+ .name = "kvm-arm-vgic-v2",
+ .create = vgic_create,
+ .destroy = vgic_destroy,
+ .set_attr = vgic_v2_set_attr,
+ .get_attr = vgic_v2_get_attr,
+ .has_attr = vgic_v2_has_attr,
+};
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 2a59dff..0140505 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -69,32 +69,14 @@
* interrupt line to be sampled again.
*/
-#define VGIC_ADDR_UNDEF (-1)
-#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+#include "vgic.h"
-#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
-#define IMPLEMENTER_ARM 0x43b
#define GICC_ARCH_VERSION_V2 0x2
-#define ACCESS_READ_VALUE (1 << 0)
-#define ACCESS_READ_RAZ (0 << 0)
-#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
-#define ACCESS_WRITE_IGNORED (0 << 1)
-#define ACCESS_WRITE_SETBIT (1 << 1)
-#define ACCESS_WRITE_CLEARBIT (2 << 1)
-#define ACCESS_WRITE_VALUE (3 << 1)
-#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
-
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
-static void vgic_update_state(struct kvm *kvm);
-static void vgic_kick_vcpus(struct kvm *kvm);
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
-static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
@@ -122,8 +104,7 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
kfree(b->private);
}
-static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
- int cpuid, u32 offset)
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
{
offset >>= 2;
if (!offset)
@@ -141,8 +122,8 @@ static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
}
-static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val)
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val)
{
unsigned long *reg;
@@ -164,7 +145,7 @@ static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
return x->private + cpuid;
}
-static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
{
return x->shared;
}
@@ -190,7 +171,7 @@ static void vgic_free_bytemap(struct vgic_bytemap *b)
kfree(b->private);
}
-static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
{
u32 *reg;
@@ -252,14 +233,14 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
}
-static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
}
-static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
+void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -275,7 +256,7 @@ static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
vcpu->arch.vgic_cpu.pending_shared);
}
-static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
if (irq < VGIC_NR_PRIVATE_IRQS)
clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
@@ -284,16 +265,6 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
vcpu->arch.vgic_cpu.pending_shared);
}
-static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
-{
- return *((u32 *)mmio->data) & mask;
-}
-
-static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
-{
- *((u32 *)mmio->data) = value & mask;
-}
-
/**
* vgic_reg_access - access vgic register
* @mmio: pointer to the data describing the mmio access
@@ -305,7 +276,7 @@ static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
* modes defined for vgic register access
* (read,raz,write-ignored,setbit,clearbit,write)
*/
-static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
phys_addr_t offset, int mode)
{
int word_offset = (offset & 3) * 8;
@@ -355,42 +326,7 @@ static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
}
}
-static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- u32 word_offset = offset & 3;
-
- switch (offset & ~3) {
- case 0: /* GICD_CTLR */
- reg = vcpu->kvm->arch.vgic.enabled;
- vgic_reg_access(mmio, ®, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vcpu->kvm->arch.vgic.enabled = reg & 1;
- vgic_update_state(vcpu->kvm);
- return true;
- }
- break;
-
- case 4: /* GICD_TYPER */
- reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
- reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
- vgic_reg_access(mmio, ®, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
-
- case 8: /* GICD_IIDR */
- reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
- vgic_reg_access(mmio, ®, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
- }
-
- return false;
-}
-
-static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
vgic_reg_access(mmio, NULL, offset,
@@ -398,8 +334,8 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
return false;
}
-static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access)
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
u32 *reg;
int mode = ACCESS_READ_VALUE | access;
@@ -420,24 +356,8 @@ static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
return false;
}
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
-}
-
-static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
-}
-
-static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access)
+bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access)
{
u32 *reg;
int mode = ACCESS_READ_VALUE | access;
@@ -452,110 +372,6 @@ static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
return false;
}
-static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
-}
-
-static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
- return false;
-}
-
-static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-#define GICD_ITARGETSR_SIZE 32
-#define GICD_CPUTARGETS_BITS 8
-#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
-static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int i;
- u32 val = 0;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
- val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
-
- return val;
-}
-
-static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i, c;
- unsigned long *bmap;
- u32 target;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- /*
- * Pick the LSB in each byte. This ensures we target exactly
- * one vcpu per IRQ. If the byte is null, assume we target
- * CPU0.
- */
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
- int shift = i * GICD_CPUTARGETS_BITS;
- target = ffs((val >> shift) & 0xffU);
- target = target ? (target - 1) : 0;
- dist->irq_spi_cpu[irq + i] = target;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
- if (c == target)
- set_bit(irq + i, bmap);
- else
- clear_bit(irq + i, bmap);
- }
- }
-}
-
-static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- /* We treat the banked interrupts targets as read-only */
- if (offset < 32) {
- u32 roreg = 1 << vcpu->vcpu_id;
- roreg |= roreg << 8;
- roreg |= roreg << 16;
-
- vgic_reg_access(mmio, &roreg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
- vgic_reg_access(mmio, ®, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
static u32 vgic_cfg_expand(u16 val)
{
u32 res = 0;
@@ -591,8 +407,8 @@ static u16 vgic_cfg_compress(u32 val)
* LSB is always 0. As such, we only keep the upper bit, and use the
* two above functions to compress/expand the bits
*/
-static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
u32 val;
@@ -623,32 +439,6 @@ static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
return false;
}
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
-
- return vgic_handle_cfg_reg(reg, mmio, offset);
-}
-
-static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- vgic_reg_access(mmio, ®, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_dispatch_sgi(vcpu, reg);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
/**
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -661,14 +451,7 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
* to the distributor but the active state stays in the LRs, because we don't
* track the active state on the distributor side.
*/
-
-static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
-}
-static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int i;
@@ -715,196 +498,18 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
}
}
-/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
-static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3) * 4;
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg = 0;
-
- /* Copy source SGIs from distributor side */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- int shift = 8 * (sgi - min_sgi);
- reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
- }
-
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, bool set)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3) * 4;
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg;
- bool updated = false;
-
- reg = mmio_data_read(mmio, ~0);
-
- /* Clear pending SGIs on the distributor */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- u8 mask = reg >> (8 * (sgi - min_sgi));
- u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
- if (set) {
- if ((*src & mask) != mask)
- updated = true;
- *src |= mask;
- } else {
- if (*src & mask)
- updated = true;
- *src &= ~mask;
- }
- }
-
- if (updated)
- vgic_update_state(vcpu->kvm);
-
- return updated;
-}
-
-static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
-}
-
-static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
-}
-
-/*
- * I would have liked to use the kvm_bus_io_*() API instead, but it
- * cannot cope with banked registers (only the VM pointer is passed
- * around, and we need the vcpu). One of these days, someone please
- * fix it!
- */
-struct mmio_range {
- phys_addr_t base;
- unsigned long len;
- int bits_per_irq;
- bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-};
-
-static const struct mmio_range vgic_dist_ranges[] = {
- {
- .base = GIC_DIST_CTRL,
- .len = 12,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_misc,
- },
- {
- .base = GIC_DIST_IGROUP,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ENABLE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg,
- },
- {
- .base = GIC_DIST_ENABLE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg,
- },
- {
- .base = GIC_DIST_PENDING_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg,
- },
- {
- .base = GIC_DIST_PENDING_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg,
- },
- {
- .base = GIC_DIST_ACTIVE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ACTIVE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_PRI,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg,
- },
- {
- .base = GIC_DIST_TARGET,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_target_reg,
- },
- {
- .base = GIC_DIST_CONFIG,
- .len = VGIC_MAX_IRQS / 4,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg,
- },
- {
- .base = GIC_DIST_SOFTINT,
- .len = 4,
- .handle_mmio = handle_mmio_sgi_reg,
- },
- {
- .base = GIC_DIST_SGI_PENDING_CLEAR,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_clear,
- },
- {
- .base = GIC_DIST_SGI_PENDING_SET,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_set,
- },
- {}
-};
-
-static const
-struct mmio_range *find_matching_range(const struct mmio_range *ranges,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+const
+struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
{
const struct mmio_range *r = ranges;
-
while (r->len) {
if (offset >= r->base &&
(offset + mmio->len) <= (r->base + r->len))
return r;
r++;
}
-
return NULL;
}
@@ -976,7 +581,7 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
*
* returns true if the MMIO access could be performed
*/
-static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio,
const struct mmio_range *ranges,
unsigned long mmio_base)
@@ -987,7 +592,7 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long offset;
offset = mmio->phys_addr - mmio_base;
- range = find_matching_range(ranges, mmio, offset);
+ range = vgic_find_matching_range(ranges, mmio, offset);
if (unlikely(!range || !range->handle_mmio)) {
pr_warn("Unhandled access %d %08llx %d\n",
mmio->is_write, mmio->phys_addr, mmio->len);
@@ -1013,27 +618,6 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true;
}
-#define IS_IN_RANGE(addr, alen, base, len) \
- (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
-
-static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
-
- if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
- KVM_VGIC_V2_DIST_SIZE))
- return false;
-
- /* GICv2 does not support accesses wider than 32 bits */
- if (mmio->len > 4) {
- kvm_inject_dabt(vcpu, mmio->phys_addr);
- return true;
- }
-
- return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
-}
-
/**
* vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
* @vcpu: pointer to the vcpu performing the access
@@ -1052,52 +636,6 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return vgic_vm_op(vcpu->kvm, handle_mmio)(vcpu, run, mmio);
}
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
-{
- return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
-}
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
-{
- struct kvm *kvm = vcpu->kvm;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- u8 target_cpus;
- int sgi, mode, c, vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
-
- sgi = reg & 0xf;
- target_cpus = (reg >> 16) & 0xff;
- mode = (reg >> 24) & 3;
-
- switch (mode) {
- case 0:
- if (!target_cpus)
- return;
- break;
-
- case 1:
- target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
- break;
-
- case 2:
- target_cpus = 1 << vcpu_id;
- break;
- }
-
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (target_cpus & 1) {
- /* Flag the SGI as pending */
- vgic_dist_irq_set(vcpu, sgi);
- *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
- kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
- }
-
- target_cpus >>= 1;
- }
-}
-
static int vgic_nr_shared_irqs(struct vgic_dist *dist)
{
return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
@@ -1136,7 +674,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
* Update the interrupt state and determine which CPUs have pending
* interrupts. Must be called with distributor lock held.
*/
-static void vgic_update_state(struct kvm *kvm)
+void vgic_update_state(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
@@ -1197,12 +735,12 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
vgic_ops->disable_underflow(vcpu);
}
-static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->get_vmcr(vcpu, vmcr);
}
-static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
vgic_ops->set_vmcr(vcpu, vmcr);
}
@@ -1251,8 +789,9 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
/*
* Queue an interrupt to a CPU virtual interface. Return true on success,
* or false if it wasn't possible to queue it.
+ * sgi_source must be zero for any non-SGI interrupts.
*/
-static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -1301,37 +840,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return true;
}
-static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long sources;
- int vcpu_id = vcpu->vcpu_id;
- int c;
-
- sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
-
- for_each_set_bit(c, &sources, dist->nr_cpus) {
- if (vgic_queue_irq(vcpu, c, irq))
- clear_bit(c, &sources);
- }
-
- *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
-
- /*
- * If the sources bitmap has been cleared it means that we
- * could queue all the SGIs onto link registers (see the
- * clear_bit above), and therefore we are done with them in
- * our emulated gic and can get rid of them.
- */
- if (!sources) {
- vgic_dist_irq_clear(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- return true;
- }
-
- return false;
-}
-
static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
{
if (vgic_irq_is_active(vcpu, irq))
@@ -1523,7 +1031,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}
-static void vgic_kick_vcpus(struct kvm *kvm)
+void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int c;
@@ -1807,7 +1315,7 @@ static void vgic_free_maps(struct vgic_dist *dist)
kfree(dist->irq_spi_target);
}
-static int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
+int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
{
int ret, i;
@@ -1822,7 +1330,8 @@ static int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
GFP_KERNEL);
dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS,
GFP_KERNEL);
- dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
+ dist->irq_spi_target = kcalloc(nr_cpus,
+ sizeof(*dist->irq_spi_target),
GFP_KERNEL);
if (!dist->irq_sgi_sources ||
!dist->irq_spi_cpu ||
@@ -1849,39 +1358,6 @@ void kvm_vgic_destroy(struct kvm *kvm)
vgic_free_maps(&kvm->arch.vgic);
}
-static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int ret, i;
-
- dist->nr_cpus = atomic_read(&kvm->online_vcpus);
-
- if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
- kvm_err("Need to set vgic distributor addresses first\n");
- return -ENXIO;
- }
-
- ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
- if (ret) {
- kvm_err("Unable to allocate maps\n");
- return ret;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
- params->vcpu_base,
- KVM_VGIC_V2_CPU_SIZE);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- return ret;
- }
-
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
-
- return 0;
-}
-
/**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
@@ -1937,15 +1413,9 @@ out:
static bool init_emulation_ops(struct kvm *kvm, int type)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
-
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
- dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
- dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
- dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
- dist->vm_ops.vgic_init = vgic_v2_init;
- return true;
+ return vgic_v2_init_emulation_ops(kvm, type);
}
return false;
}
@@ -2086,185 +1556,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
return r;
}
-static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+int vgic_has_attr_regs(const struct mmio_range *ranges,
+ phys_addr_t offset)
{
- bool updated = false;
- struct vgic_vmcr vmcr;
- u32 *vmcr_field;
- u32 reg;
-
- vgic_get_vmcr(vcpu, &vmcr);
-
- switch (offset & ~0x3) {
- case GIC_CPU_CTRL:
- vmcr_field = &vmcr.ctlr;
- break;
- case GIC_CPU_PRIMASK:
- vmcr_field = &vmcr.pmr;
- break;
- case GIC_CPU_BINPOINT:
- vmcr_field = &vmcr.bpr;
- break;
- case GIC_CPU_ALIAS_BINPOINT:
- vmcr_field = &vmcr.abpr;
- break;
- default:
- BUG();
- }
-
- if (!mmio->is_write) {
- reg = *vmcr_field;
- mmio_data_write(mmio, ~0, reg);
- } else {
- reg = mmio_data_read(mmio, ~0);
- if (reg != *vmcr_field) {
- *vmcr_field = reg;
- vgic_set_vmcr(vcpu, &vmcr);
- updated = true;
- }
- }
- return updated;
-}
-
-static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
-}
-
-static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- if (mmio->is_write)
- return false;
-
- /* GICC_IIDR */
- reg = (PRODUCT_ID_KVM << 20) |
- (GICC_ARCH_VERSION_V2 << 16) |
- (IMPLEMENTER_ARM << 0);
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-/*
- * CPU Interface Register accesses - these are not accessed by the VM, but by
- * user space for saving and restoring VGIC state.
- */
-static const struct mmio_range vgic_cpu_ranges[] = {
- {
- .base = GIC_CPU_CTRL,
- .len = 12,
- .handle_mmio = handle_cpu_mmio_misc,
- },
- {
- .base = GIC_CPU_ALIAS_BINPOINT,
- .len = 4,
- .handle_mmio = handle_mmio_abpr,
- },
- {
- .base = GIC_CPU_ACTIVEPRIO,
- .len = 16,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_CPU_IDENT,
- .len = 4,
- .handle_mmio = handle_cpu_mmio_ident,
- },
-};
-
-static int vgic_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u32 *reg, bool is_write)
-{
- const struct mmio_range *r = NULL, *ranges;
- phys_addr_t offset;
- int ret, cpuid, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- struct vgic_dist *vgic;
- struct kvm_exit_mmio mmio;
-
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-
- mutex_lock(&dev->kvm->lock);
-
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
- goto out;
- }
-
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- vgic = &dev->kvm->arch.vgic;
-
- mmio.len = 4;
- mmio.is_write = is_write;
- if (is_write)
- mmio_data_write(&mmio, ~0, *reg);
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- mmio.phys_addr = vgic->vgic_dist_base + offset;
- ranges = vgic_dist_ranges;
- break;
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- mmio.phys_addr = vgic->vgic_cpu_base + offset;
- ranges = vgic_cpu_ranges;
- break;
- default:
- BUG();
- }
- r = find_matching_range(ranges, &mmio, offset);
-
- if (unlikely(!r || !r->handle_mmio)) {
- ret = -ENXIO;
- goto out;
- }
-
-
- spin_lock(&vgic->lock);
-
- /*
- * Ensure that no other VCPU is running by checking the vcpu->cpu
- * field. If no other VPCUs are running we can safely access the VGIC
- * state, because even if another VPU is run after this point, that
- * VCPU will not touch the vgic state, because it will block on
- * getting the vgic->lock in kvm_vgic_sync_hwstate().
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (unlikely(tmp_vcpu->cpu != -1)) {
- ret = -EBUSY;
- goto out_vgic_unlock;
- }
- }
-
- /*
- * Move all pending IRQs from the LRs on all VCPUs so the pending
- * state can be properly represented in the register state accessible
- * through this API.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
- vgic_unqueue_irqs(tmp_vcpu);
-
- offset -= r->base;
- r->handle_mmio(vcpu, &mmio, offset);
-
- if (!is_write)
- *reg = mmio_data_read(&mmio, ~0);
+ struct kvm_exit_mmio dev_attr_mmio;
- ret = 0;
-out_vgic_unlock:
- spin_unlock(&vgic->lock);
-out:
- mutex_unlock(&dev->kvm->lock);
- return ret;
+ dev_attr_mmio.len = 4;
+ if (vgic_find_matching_range(ranges, &dev_attr_mmio, offset))
+ return 0;
+ else
+ return -ENXIO;
}
-static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
@@ -2281,16 +1585,6 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return (r == -ENODEV) ? -ENXIO : r;
}
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg;
-
- if (get_user(reg, uaddr))
- return -EFAULT;
-
- return vgic_attr_regs_access(dev, attr, ®, true);
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 val;
@@ -2319,7 +1613,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -ENXIO;
}
-static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = -ENXIO;
@@ -2337,18 +1631,6 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -EFAULT;
break;
}
-
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg = 0;
-
- r = vgic_attr_regs_access(dev, attr, ®, false);
- if (r)
- return r;
- r = put_user(reg, uaddr);
- break;
- }
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
@@ -2366,58 +1648,12 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return r;
}
-static int vgic_has_attr_regs(const struct mmio_range *ranges,
- phys_addr_t offset)
-{
- struct kvm_exit_mmio dev_attr_mmio;
-
- dev_attr_mmio.len = 4;
- if (find_matching_range(ranges, &dev_attr_mmio, offset))
- return 0;
- else
- return -ENXIO;
-}
-
-static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-{
- phys_addr_t offset;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR:
- switch (attr->attr) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- return 0;
- }
- break;
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_dist_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_cpu_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
- case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
- return 0;
- }
- return -ENXIO;
-}
-
-static void vgic_destroy(struct kvm_device *dev)
+void vgic_destroy(struct kvm_device *dev)
{
kfree(dev);
}
-static int vgic_create(struct kvm_device *dev, u32 type)
+int vgic_create(struct kvm_device *dev, u32 type)
{
return kvm_vgic_create(dev->kvm, type);
}
-
-struct kvm_device_ops kvm_arm_vgic_v2_ops = {
- .name = "kvm-arm-vgic",
- .create = vgic_create,
- .destroy = vgic_destroy,
- .set_attr = vgic_set_attr,
- .get_attr = vgic_get_attr,
- .has_attr = vgic_has_attr,
-};
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
new file mode 100644
index 0000000..e900aeb
--- /dev/null
+++ b/virt/kvm/arm/vgic.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012-2014 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from virt/kvm/arm/vgic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define VGIC_ADDR_UNDEF (-1)
+#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+
+#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
+#define IMPLEMENTER_ARM 0x43b
+
+#define ACCESS_READ_VALUE (1 << 0)
+#define ACCESS_READ_RAZ (0 << 0)
+#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
+#define ACCESS_WRITE_IGNORED (0 << 1)
+#define ACCESS_WRITE_SETBIT (1 << 1)
+#define ACCESS_WRITE_CLEARBIT (2 << 1)
+#define ACCESS_WRITE_VALUE (3 << 1)
+#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
+
+#define VCPU_NOT_ALLOCATED ((u8)-1)
+
+unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
+
+void vgic_update_state(struct kvm *kvm);
+int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs);
+
+u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
+u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
+
+void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq);
+void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq);
+void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
+void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
+ int irq, int val);
+
+void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+
+bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
+void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
+
+void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
+ phys_addr_t offset, int mode);
+bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+static inline
+u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
+{
+ return *((u32 *)mmio->data) & mask;
+}
+
+static inline
+void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
+{
+ *((u32 *)mmio->data) = value & mask;
+}
+
+struct mmio_range {
+ phys_addr_t base;
+ unsigned long len;
+ int bits_per_irq;
+ bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+};
+
+#define IS_IN_RANGE(addr, alen, base, len) \
+ (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
+
+const
+struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio,
+ const struct mmio_range *ranges,
+ unsigned long mmio_base);
+
+bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access);
+
+bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id, int access);
+
+bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
+ phys_addr_t offset);
+
+void vgic_kick_vcpus(struct kvm *kvm);
+
+int vgic_create(struct kvm_device *dev, u32 type);
+void vgic_destroy(struct kvm_device *dev);
+
+int vgic_has_attr_regs(const struct mmio_range *ranges, phys_addr_t offset);
+int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
+
+bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type);
--
1.7.9.5
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
2014-06-19 21:27 ` Chalamarla, Tirumalesh
@ 2014-06-20 8:43 ` Andre Przywara
0 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-20 8:43 UTC (permalink / raw)
To: linux-arm-kernel
Hi,
On 19/06/14 22:27, Chalamarla, Tirumalesh wrote:
> Ok, what is the need for separate file why can't we use vgic-v2.c itself, any reason which I am missing.
Because this is the guest facing side of it, while vgic-v2.c contains
the host-facing bits and pieces. I think it is a totally different domain.
Beside that we support GICv2 emulation on some GICv3 hosts, so I think
it makes more sense to split it that way.
Regards,
Andre.
>
> -----Original Message-----
> From: kvmarm-bounces at lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
> Sent: Thursday, June 19, 2014 2:46 AM
> To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
> Cc: christoffer.dall at linaro.org
> Subject: [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
>
> vgic.c is currently a mixture of generic vGIC emulation code and
> functions specific to emulating a GICv2. To ease the addition of
> GICv3, split off strictly v2 specific parts into a new file
> vgic-v2-emul.c.
> A new header file vgic.h is introduced to allow separation and later
> sharing of functions.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
> arch/arm/kvm/Makefile | 1 +
> arch/arm64/kvm/Makefile | 1 +
> virt/kvm/arm/vgic-v2-emul.c | 795 ++++++++++++++++++++++++++++++++++++++++
> virt/kvm/arm/vgic.c | 856 +++----------------------------------------
> virt/kvm/arm/vgic.h | 113 ++++++
> 5 files changed, 956 insertions(+), 810 deletions(-)
> create mode 100644 virt/kvm/arm/vgic-v2-emul.c
> create mode 100644 virt/kvm/arm/vgic.h
>
> diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
> index f7057ed..443b8be 100644
> --- a/arch/arm/kvm/Makefile
> +++ b/arch/arm/kvm/Makefile
> @@ -22,4 +22,5 @@ obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
> obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
> obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
> obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
> +obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
> obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
> index 32a0961..f241db6 100644
> --- a/arch/arm64/kvm/Makefile
> +++ b/arch/arm64/kvm/Makefile
> @@ -20,6 +20,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
> kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
>
> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
> kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
> diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
> new file mode 100644
> index 0000000..ba5f873
> --- /dev/null
> +++ b/virt/kvm/arm/vgic-v2-emul.c
> @@ -0,0 +1,795 @@
> +/*
> + * Contains GICv2 specific emulation code, was in vgic.c before.
> + *
> + * Copyright (C) 2012 ARM Ltd.
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/cpu.h>
> +#include <linux/kvm.h>
> +#include <linux/kvm_host.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/uaccess.h>
> +
> +#include <linux/irqchip/arm-gic.h>
> +
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_arm.h>
> +#include <asm/kvm_mmu.h>
> +
> +#include "vgic.h"
> +
> +#define GICC_ARCH_VERSION_V2 0x2
> +
> +static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
> +static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
> +{
> + return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
> +}
> +
> +static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio, phys_addr_t offset)
> +{
> + u32 reg;
> + u32 word_offset = offset & 3;
> +
> + switch (offset & ~3) {
> + case 0: /* GICD_CTLR */
> + reg = vcpu->kvm->arch.vgic.enabled;
> + vgic_reg_access(mmio, ®, word_offset,
> + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
> + if (mmio->is_write) {
> + vcpu->kvm->arch.vgic.enabled = reg & 1;
> + vgic_update_state(vcpu->kvm);
> + return true;
> + }
> + break;
> +
> + case 4: /* GICD_TYPER */
> + reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
> + reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
> + vgic_reg_access(mmio, ®, word_offset,
> + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
> + break;
> +
> + case 8: /* GICD_IIDR */
> + reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
> + vgic_reg_access(mmio, ®, word_offset,
> + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
> + break;
> + }
> +
> + return false;
> +}
> +
> +static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
> + vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
> +}
> +
> +static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
> + vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
> +}
> +
> +static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
> + vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
> +}
> +
> +static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
> + vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
> +}
> +
> +static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + u32 *reg;
> +
> + reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
> + vcpu->vcpu_id, offset);
> + vgic_reg_access(mmio, reg, offset,
> + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
> + return false;
> +}
> +
> +#define GICD_ITARGETSR_SIZE 32
> +#define GICD_CPUTARGETS_BITS 8
> +#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
> +static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
> +{
> + struct vgic_dist *dist = &kvm->arch.vgic;
> + int i;
> + u32 val = 0;
> +
> + irq -= VGIC_NR_PRIVATE_IRQS;
> +
> + for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
> + val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
> +
> + return val;
> +}
> +
> +static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
> +{
> + struct vgic_dist *dist = &kvm->arch.vgic;
> + struct kvm_vcpu *vcpu;
> + int i, c;
> + unsigned long *bmap;
> + u32 target;
> +
> + irq -= VGIC_NR_PRIVATE_IRQS;
> +
> + /*
> + * Pick the LSB in each byte. This ensures we target exactly
> + * one vcpu per IRQ. If the byte is null, assume we target
> + * CPU0.
> + */
> + for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
> + int shift = i * GICD_CPUTARGETS_BITS;
> + target = ffs((val >> shift) & 0xffU);
> + target = target ? (target - 1) : 0;
> + dist->irq_spi_cpu[irq + i] = target;
> + kvm_for_each_vcpu(c, vcpu, kvm) {
> + bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
> + if (c == target)
> + set_bit(irq + i, bmap);
> + else
> + clear_bit(irq + i, bmap);
> + }
> + }
> +}
> +
> +static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + u32 reg;
> +
> + /* We treat the banked interrupts targets as read-only */
> + if (offset < 32) {
> + u32 roreg = 1 << vcpu->vcpu_id;
> + roreg |= roreg << 8;
> + roreg |= roreg << 16;
> +
> + vgic_reg_access(mmio, &roreg, offset,
> + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
> + return false;
> + }
> +
> + reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
> + vgic_reg_access(mmio, ®, offset,
> + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
> + if (mmio->is_write) {
> + vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
> + vgic_update_state(vcpu->kvm);
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio, phys_addr_t offset)
> +{
> + u32 *reg;
> +
> + reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
> + vcpu->vcpu_id, offset >> 1);
> +
> + return vgic_handle_cfg_reg(reg, mmio, offset);
> +}
> +
> +static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio, phys_addr_t offset)
> +{
> + u32 reg;
> + vgic_reg_access(mmio, ®, offset,
> + ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
> + if (mmio->is_write) {
> + vgic_dispatch_sgi(vcpu, reg);
> + vgic_update_state(vcpu->kvm);
> + return true;
> + }
> +
> + return false;
> +}
> +
> +/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
> +static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> + int sgi;
> + int min_sgi = (offset & ~0x3) * 4;
> + int max_sgi = min_sgi + 3;
> + int vcpu_id = vcpu->vcpu_id;
> + u32 reg = 0;
> +
> + /* Copy source SGIs from distributor side */
> + for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
> + u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
> + reg |= ((u32)sources) << (8 * (sgi - min_sgi));
> + }
> +
> + mmio_data_write(mmio, ~0, reg);
> + return false;
> +}
> +
> +static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset, bool set)
> +{
> + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> + int sgi;
> + int min_sgi = (offset & ~0x3) * 4;
> + int max_sgi = min_sgi + 3;
> + int vcpu_id = vcpu->vcpu_id;
> + u32 reg;
> + bool updated = false;
> +
> + reg = mmio_data_read(mmio, ~0);
> +
> + /* Clear pending SGIs on the distributor */
> + for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
> + u8 mask = reg >> (8 * (sgi - min_sgi));
> + u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
> + if (set) {
> + if ((*src & mask) != mask)
> + updated = true;
> + *src |= mask;
> + } else {
> + if (*src & mask)
> + updated = true;
> + *src &= ~mask;
> + }
> + }
> +
> + if (updated)
> + vgic_update_state(vcpu->kvm);
> +
> + return updated;
> +}
> +
> +static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + if (!mmio->is_write)
> + return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
> + else
> + return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
> +}
> +
> +static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + if (!mmio->is_write)
> + return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
> + else
> + return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
> +}
> +
> +/*
> + * I would have liked to use the kvm_bus_io_*() API instead, but it
> + * cannot cope with banked registers (only the VM pointer is passed
> + * around, and we need the vcpu). One of these days, someone please
> + * fix it!
> + */
> +static const struct mmio_range vgic_dist_ranges[] = {
> + {
> + .base = GIC_DIST_CTRL,
> + .len = 12,
> + .bits_per_irq = 0,
> + .handle_mmio = handle_mmio_misc,
> + },
> + {
> + .base = GIC_DIST_IGROUP,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_raz_wi,
> + },
> + {
> + .base = GIC_DIST_ENABLE_SET,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_set_enable_reg,
> + },
> + {
> + .base = GIC_DIST_ENABLE_CLEAR,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_clear_enable_reg,
> + },
> + {
> + .base = GIC_DIST_PENDING_SET,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_set_pending_reg,
> + },
> + {
> + .base = GIC_DIST_PENDING_CLEAR,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_clear_pending_reg,
> + },
> + {
> + .base = GIC_DIST_ACTIVE_SET,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_raz_wi,
> + },
> + {
> + .base = GIC_DIST_ACTIVE_CLEAR,
> + .len = VGIC_MAX_IRQS / 8,
> + .bits_per_irq = 1,
> + .handle_mmio = handle_mmio_raz_wi,
> + },
> + {
> + .base = GIC_DIST_PRI,
> + .len = VGIC_MAX_IRQS,
> + .bits_per_irq = 8,
> + .handle_mmio = handle_mmio_priority_reg,
> + },
> + {
> + .base = GIC_DIST_TARGET,
> + .len = VGIC_MAX_IRQS,
> + .bits_per_irq = 8,
> + .handle_mmio = handle_mmio_target_reg,
> + },
> + {
> + .base = GIC_DIST_CONFIG,
> + .len = VGIC_MAX_IRQS / 4,
> + .bits_per_irq = 2,
> + .handle_mmio = handle_mmio_cfg_reg,
> + },
> + {
> + .base = GIC_DIST_SOFTINT,
> + .len = 4,
> + .handle_mmio = handle_mmio_sgi_reg,
> + },
> + {
> + .base = GIC_DIST_SGI_PENDING_CLEAR,
> + .len = VGIC_NR_SGIS,
> + .handle_mmio = handle_mmio_sgi_clear,
> + },
> + {
> + .base = GIC_DIST_SGI_PENDING_SET,
> + .len = VGIC_NR_SGIS,
> + .handle_mmio = handle_mmio_sgi_set,
> + },
> + {}
> +};
> +
> +static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + struct kvm_exit_mmio *mmio)
> +{
> + unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
> +
> + if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
> + KVM_VGIC_V2_DIST_SIZE))
> + return false;
> +
> + /* GICv2 does not support accesses wider than 32 bits */
> + if (mmio->len > 4) {
> + kvm_inject_dabt(vcpu, mmio->phys_addr);
> + return true;
> + }
> +
> + return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
> +}
> +
> +static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + struct vgic_dist *dist = &kvm->arch.vgic;
> + int nrcpus = atomic_read(&kvm->online_vcpus);
> + u8 target_cpus;
> + int sgi, mode, c, vcpu_id;
> +
> + vcpu_id = vcpu->vcpu_id;
> +
> + sgi = reg & 0xf;
> + target_cpus = (reg >> 16) & 0xff;
> + mode = (reg >> 24) & 3;
> +
> + switch (mode) {
> + case 0:
> + if (!target_cpus)
> + return;
> + break;
> +
> + case 1:
> + target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
> + break;
> +
> + case 2:
> + target_cpus = 1 << vcpu_id;
> + break;
> + }
> +
> + kvm_for_each_vcpu(c, vcpu, kvm) {
> + if (target_cpus & 1) {
> + /* Flag the SGI as pending */
> + vgic_dist_irq_set(vcpu, sgi);
> + *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
> + kvm_debug("SGI%d from CPU%d to CPU%d\n",
> + sgi, vcpu_id, c);
> + }
> +
> + target_cpus >>= 1;
> + }
> +}
> +
> +static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
> +{
> + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> + unsigned long sources;
> + int vcpu_id = vcpu->vcpu_id;
> + int c;
> +
> + sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
> +
> + for_each_set_bit(c, &sources, dist->nr_cpus) {
> + if (vgic_queue_irq(vcpu, c, irq))
> + clear_bit(c, &sources);
> + }
> +
> + *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
> +
> + /*
> + * If the sources bitmap has been cleared it means that we
> + * could queue all the SGIs onto link registers (see the
> + * clear_bit above), and therefore we are done with them in
> + * our emulated gic and can get rid of them.
> + */
> + if (!sources) {
> + vgic_dist_irq_clear(vcpu, irq);
> + vgic_cpu_irq_clear(vcpu, irq);
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
> +{
> + struct vgic_dist *dist = &kvm->arch.vgic;
> + int ret, i;
> +
> + dist->nr_cpus = atomic_read(&kvm->online_vcpus);
> +
> + if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
> + IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
> + kvm_err("Need to set vgic distributor addresses first\n");
> + return -ENXIO;
> + }
> +
> + ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
> + if (ret) {
> + kvm_err("Unable to allocate maps\n");
> + return ret;
> + }
> +
> + ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
> + params->vcpu_base,
> + KVM_VGIC_V2_CPU_SIZE);
> + if (ret) {
> + kvm_err("Unable to remap VGIC CPU to VCPU\n");
> + return ret;
> + }
> +
> + for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
> + vgic_set_target_reg(kvm, 0, i);
> +
> + return 0;
> +}
> +
> +static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
> +{
> + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> +
> + *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
> +}
> +
> +bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type)
> +{
> + struct vgic_dist *dist = &kvm->arch.vgic;
> +
> + switch (type) {
> + case KVM_DEV_TYPE_ARM_VGIC_V2:
> + dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
> + dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
> + dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
> + dist->vm_ops.vgic_init = vgic_v2_init;
> + return true;
> + }
> + return false;
> +}
> +
> +static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio, phys_addr_t offset)
> +{
> + bool updated = false;
> + struct vgic_vmcr vmcr;
> + u32 *vmcr_field;
> + u32 reg;
> +
> + vgic_get_vmcr(vcpu, &vmcr);
> +
> + switch (offset & ~0x3) {
> + case GIC_CPU_CTRL:
> + vmcr_field = &vmcr.ctlr;
> + break;
> + case GIC_CPU_PRIMASK:
> + vmcr_field = &vmcr.pmr;
> + break;
> + case GIC_CPU_BINPOINT:
> + vmcr_field = &vmcr.bpr;
> + break;
> + case GIC_CPU_ALIAS_BINPOINT:
> + vmcr_field = &vmcr.abpr;
> + break;
> + default:
> + BUG();
> + }
> +
> + if (!mmio->is_write) {
> + reg = *vmcr_field;
> + mmio_data_write(mmio, ~0, reg);
> + } else {
> + reg = mmio_data_read(mmio, ~0);
> + if (reg != *vmcr_field) {
> + *vmcr_field = reg;
> + vgic_set_vmcr(vcpu, &vmcr);
> + updated = true;
> + }
> + }
> + return updated;
> +}
> +
> +static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio, phys_addr_t offset)
> +{
> + return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
> +}
> +
> +static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> +{
> + u32 reg;
> +
> + if (mmio->is_write)
> + return false;
> +
> + /* GICC_IIDR */
> + reg = (PRODUCT_ID_KVM << 20) |
> + (GICC_ARCH_VERSION_V2 << 16) |
> + (IMPLEMENTER_ARM << 0);
> + mmio_data_write(mmio, ~0, reg);
> + return false;
> +}
> +
> +/*
> + * CPU Interface Register accesses - these are not accessed by the VM, but by
> + * user space for saving and restoring VGIC state.
> + */
> +static const struct mmio_range vgic_cpu_ranges[] = {
> + {
> + .base = GIC_CPU_CTRL,
> + .len = 12,
> + .handle_mmio = handle_cpu_mmio_misc,
> + },
> + {
> + .base = GIC_CPU_ALIAS_BINPOINT,
> + .len = 4,
> + .handle_mmio = handle_mmio_abpr,
> + },
> + {
> + .base = GIC_CPU_ACTIVEPRIO,
> + .len = 16,
> + .handle_mmio = handle_mmio_raz_wi,
> + },
> + {
> + .base = GIC_CPU_IDENT,
> + .len = 4,
> + .handle_mmio = handle_cpu_mmio_ident,
> + },
> +};
> +
> +static int vgic_attr_regs_access(struct kvm_device *dev,
> + struct kvm_device_attr *attr,
> + u32 *reg, bool is_write)
> +{
> + const struct mmio_range *r = NULL, *ranges;
> + phys_addr_t offset;
> + int ret, cpuid, c;
> + struct kvm_vcpu *vcpu, *tmp_vcpu;
> + struct vgic_dist *vgic;
> + struct kvm_exit_mmio mmio;
> +
> + offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> + KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> +
> + mutex_lock(&dev->kvm->lock);
> +
> + if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
> + ret = -EINVAL;
> + goto out;
> + }
> +
> + vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> + vgic = &dev->kvm->arch.vgic;
> +
> + mmio.len = 4;
> + mmio.is_write = is_write;
> + if (is_write)
> + mmio_data_write(&mmio, ~0, *reg);
> + switch (attr->group) {
> + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> + mmio.phys_addr = vgic->vgic_dist_base + offset;
> + ranges = vgic_dist_ranges;
> + break;
> + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> + mmio.phys_addr = vgic->vgic_cpu_base + offset;
> + ranges = vgic_cpu_ranges;
> + break;
> + default:
> + BUG();
> + }
> + r = vgic_find_matching_range(ranges, &mmio, offset);
> +
> + if (unlikely(!r || !r->handle_mmio)) {
> + ret = -ENXIO;
> + goto out;
> + }
> +
> +
> + spin_lock(&vgic->lock);
> +
> + /*
> + * Ensure that no other VCPU is running by checking the vcpu->cpu
> + * field. If no other VPCUs are running we can safely access the VGIC
> + * state, because even if another VPU is run after this point, that
> + * VCPU will not touch the vgic state, because it will block on
> + * getting the vgic->lock in kvm_vgic_sync_hwstate().
> + */
> + kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
> + if (unlikely(tmp_vcpu->cpu != -1)) {
> + ret = -EBUSY;
> + goto out_vgic_unlock;
> + }
> + }
> +
> + /*
> + * Move all pending IRQs from the LRs on all VCPUs so the pending
> + * state can be properly represented in the register state accessible
> + * through this API.
> + */
> + kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
> + vgic_unqueue_irqs(tmp_vcpu);
> +
> + offset -= r->base;
> + r->handle_mmio(vcpu, &mmio, offset);
> +
> + if (!is_write)
> + *reg = mmio_data_read(&mmio, ~0);
> +
> + ret = 0;
> +out_vgic_unlock:
> + spin_unlock(&vgic->lock);
> +out:
> + mutex_unlock(&dev->kvm->lock);
> + return ret;
> +}
> +
> +static int vgic_v2_has_attr(struct kvm_device *dev,
> + struct kvm_device_attr *attr)
> +{
> + phys_addr_t offset;
> +
> + switch (attr->group) {
> + case KVM_DEV_ARM_VGIC_GRP_ADDR:
> + switch (attr->attr) {
> + case KVM_VGIC_V2_ADDR_TYPE_DIST:
> + case KVM_VGIC_V2_ADDR_TYPE_CPU:
> + return 0;
> + }
> + break;
> + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> + offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> + return vgic_has_attr_regs(vgic_dist_ranges, offset);
> + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> + offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> + return vgic_has_attr_regs(vgic_cpu_ranges, offset);
> + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
> + case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
> + return 0;
> + }
> + return -ENXIO;
> +}
> +
> +static int vgic_v2_set_attr(struct kvm_device *dev,
> + struct kvm_device_attr *attr)
> +{
> + int ret;
> + u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> + u32 reg;
> +
> + ret = vgic_set_common_attr(dev, attr);
> + if (!ret)
> + return ret;
> +
> + switch (attr->group) {
> + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> + if (get_user(reg, uaddr))
> + return -EFAULT;
> + return vgic_attr_regs_access(dev, attr, ®, true);
> + }
> + return -ENXIO;
> +}
> +
> +static int vgic_v2_get_attr(struct kvm_device *dev,
> + struct kvm_device_attr *attr)
> +{
> + u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> + u32 reg = 0;
> + int r;
> +
> + r = vgic_get_common_attr(dev, attr);
> + if (!r)
> + return r;
> +
> + switch (attr->group) {
> + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> + r = vgic_attr_regs_access(dev, attr, ®, false);
> + if (r)
> + return r;
> + r = put_user(reg, uaddr);
> + break;
> + }
> +
> + return r;
> +}
> +
> +struct kvm_device_ops kvm_arm_vgic_v2_ops = {
> + .name = "kvm-arm-vgic-v2",
> + .create = vgic_create,
> + .destroy = vgic_destroy,
> + .set_attr = vgic_v2_set_attr,
> + .get_attr = vgic_v2_get_attr,
> + .has_attr = vgic_v2_has_attr,
> +};
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 2a59dff..0140505 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -69,32 +69,14 @@
> * interrupt line to be sampled again.
> */
>
> -#define VGIC_ADDR_UNDEF (-1)
> -#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
> +#include "vgic.h"
>
> -#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
> -#define IMPLEMENTER_ARM 0x43b
> #define GICC_ARCH_VERSION_V2 0x2
>
> -#define ACCESS_READ_VALUE (1 << 0)
> -#define ACCESS_READ_RAZ (0 << 0)
> -#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
> -#define ACCESS_WRITE_IGNORED (0 << 1)
> -#define ACCESS_WRITE_SETBIT (1 << 1)
> -#define ACCESS_WRITE_CLEARBIT (2 << 1)
> -#define ACCESS_WRITE_VALUE (3 << 1)
> -#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
> -
> static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
> -static void vgic_update_state(struct kvm *kvm);
> -static void vgic_kick_vcpus(struct kvm *kvm);
> -static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
> -static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
> static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
> static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
> -static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> -static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>
> static const struct vgic_ops *vgic_ops;
> static const struct vgic_params *vgic;
> @@ -122,8 +104,7 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
> kfree(b->private);
> }
>
> -static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
> - int cpuid, u32 offset)
> +u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
> {
> offset >>= 2;
> if (!offset)
> @@ -141,8 +122,8 @@ static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
> return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
> }
>
> -static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
> - int irq, int val)
> +void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
> + int irq, int val)
> {
> unsigned long *reg;
>
> @@ -164,7 +145,7 @@ static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
> return x->private + cpuid;
> }
>
> -static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
> +unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
> {
> return x->shared;
> }
> @@ -190,7 +171,7 @@ static void vgic_free_bytemap(struct vgic_bytemap *b)
> kfree(b->private);
> }
>
> -static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
> +u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
> {
> u32 *reg;
>
> @@ -252,14 +233,14 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
> return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
> }
>
> -static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
> +void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
> {
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>
> vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
> }
>
> -static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
> +void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
> {
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>
> @@ -275,7 +256,7 @@ static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
> vcpu->arch.vgic_cpu.pending_shared);
> }
>
> -static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
> +void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
> {
> if (irq < VGIC_NR_PRIVATE_IRQS)
> clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
> @@ -284,16 +265,6 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
> vcpu->arch.vgic_cpu.pending_shared);
> }
>
> -static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
> -{
> - return *((u32 *)mmio->data) & mask;
> -}
> -
> -static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
> -{
> - *((u32 *)mmio->data) = value & mask;
> -}
> -
> /**
> * vgic_reg_access - access vgic register
> * @mmio: pointer to the data describing the mmio access
> @@ -305,7 +276,7 @@ static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
> * modes defined for vgic register access
> * (read,raz,write-ignored,setbit,clearbit,write)
> */
> -static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
> +void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
> phys_addr_t offset, int mode)
> {
> int word_offset = (offset & 3) * 8;
> @@ -355,42 +326,7 @@ static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
> }
> }
>
> -static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio, phys_addr_t offset)
> -{
> - u32 reg;
> - u32 word_offset = offset & 3;
> -
> - switch (offset & ~3) {
> - case 0: /* GICD_CTLR */
> - reg = vcpu->kvm->arch.vgic.enabled;
> - vgic_reg_access(mmio, ®, word_offset,
> - ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
> - if (mmio->is_write) {
> - vcpu->kvm->arch.vgic.enabled = reg & 1;
> - vgic_update_state(vcpu->kvm);
> - return true;
> - }
> - break;
> -
> - case 4: /* GICD_TYPER */
> - reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
> - reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
> - vgic_reg_access(mmio, ®, word_offset,
> - ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
> - break;
> -
> - case 8: /* GICD_IIDR */
> - reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
> - vgic_reg_access(mmio, ®, word_offset,
> - ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
> - break;
> - }
> -
> - return false;
> -}
> -
> -static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
> +bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
> struct kvm_exit_mmio *mmio, phys_addr_t offset)
> {
> vgic_reg_access(mmio, NULL, offset,
> @@ -398,8 +334,8 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
> return false;
> }
>
> -static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> - phys_addr_t offset, int vcpu_id, int access)
> +bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset, int vcpu_id, int access)
> {
> u32 *reg;
> int mode = ACCESS_READ_VALUE | access;
> @@ -420,24 +356,8 @@ static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> return false;
> }
>
> -static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
> - vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
> -}
> -
> -static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
> - vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
> -}
> -
> -static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> - phys_addr_t offset, int vcpu_id, int access)
> +bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset, int vcpu_id, int access)
> {
> u32 *reg;
> int mode = ACCESS_READ_VALUE | access;
> @@ -452,110 +372,6 @@ static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> return false;
> }
>
> -static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
> - vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
> -}
> -
> -static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
> - vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
> - return false;
> -}
> -
> -static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
> - vcpu->vcpu_id, offset);
> - vgic_reg_access(mmio, reg, offset,
> - ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
> - return false;
> -}
> -
> -#define GICD_ITARGETSR_SIZE 32
> -#define GICD_CPUTARGETS_BITS 8
> -#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
> -static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
> -{
> - struct vgic_dist *dist = &kvm->arch.vgic;
> - int i;
> - u32 val = 0;
> -
> - irq -= VGIC_NR_PRIVATE_IRQS;
> -
> - for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
> - val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
> -
> - return val;
> -}
> -
> -static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
> -{
> - struct vgic_dist *dist = &kvm->arch.vgic;
> - struct kvm_vcpu *vcpu;
> - int i, c;
> - unsigned long *bmap;
> - u32 target;
> -
> - irq -= VGIC_NR_PRIVATE_IRQS;
> -
> - /*
> - * Pick the LSB in each byte. This ensures we target exactly
> - * one vcpu per IRQ. If the byte is null, assume we target
> - * CPU0.
> - */
> - for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
> - int shift = i * GICD_CPUTARGETS_BITS;
> - target = ffs((val >> shift) & 0xffU);
> - target = target ? (target - 1) : 0;
> - dist->irq_spi_cpu[irq + i] = target;
> - kvm_for_each_vcpu(c, vcpu, kvm) {
> - bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
> - if (c == target)
> - set_bit(irq + i, bmap);
> - else
> - clear_bit(irq + i, bmap);
> - }
> - }
> -}
> -
> -static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - u32 reg;
> -
> - /* We treat the banked interrupts targets as read-only */
> - if (offset < 32) {
> - u32 roreg = 1 << vcpu->vcpu_id;
> - roreg |= roreg << 8;
> - roreg |= roreg << 16;
> -
> - vgic_reg_access(mmio, &roreg, offset,
> - ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
> - return false;
> - }
> -
> - reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
> - vgic_reg_access(mmio, ®, offset,
> - ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
> - if (mmio->is_write) {
> - vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
> - vgic_update_state(vcpu->kvm);
> - return true;
> - }
> -
> - return false;
> -}
> -
> static u32 vgic_cfg_expand(u16 val)
> {
> u32 res = 0;
> @@ -591,8 +407,8 @@ static u16 vgic_cfg_compress(u32 val)
> * LSB is always 0. As such, we only keep the upper bit, and use the
> * two above functions to compress/expand the bits
> */
> -static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> +bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> {
> u32 val;
>
> @@ -623,32 +439,6 @@ static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
> return false;
> }
>
> -static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio, phys_addr_t offset)
> -{
> - u32 *reg;
> -
> - reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
> - vcpu->vcpu_id, offset >> 1);
> -
> - return vgic_handle_cfg_reg(reg, mmio, offset);
> -}
> -
> -static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio, phys_addr_t offset)
> -{
> - u32 reg;
> - vgic_reg_access(mmio, ®, offset,
> - ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
> - if (mmio->is_write) {
> - vgic_dispatch_sgi(vcpu, reg);
> - vgic_update_state(vcpu->kvm);
> - return true;
> - }
> -
> - return false;
> -}
> -
> /**
> * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
> * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
> @@ -661,14 +451,7 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
> * to the distributor but the active state stays in the LRs, because we don't
> * track the active state on the distributor side.
> */
> -
> -static void vgic_v2_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
> -{
> - struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> -
> - *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
> -}
> -static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
> +void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> int i;
> @@ -715,196 +498,18 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
> }
> }
>
> -/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
> -static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> - int sgi;
> - int min_sgi = (offset & ~0x3) * 4;
> - int max_sgi = min_sgi + 3;
> - int vcpu_id = vcpu->vcpu_id;
> - u32 reg = 0;
> -
> - /* Copy source SGIs from distributor side */
> - for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
> - int shift = 8 * (sgi - min_sgi);
> - reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
> - }
> -
> - mmio_data_write(mmio, ~0, reg);
> - return false;
> -}
> -
> -static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset, bool set)
> -{
> - struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> - int sgi;
> - int min_sgi = (offset & ~0x3) * 4;
> - int max_sgi = min_sgi + 3;
> - int vcpu_id = vcpu->vcpu_id;
> - u32 reg;
> - bool updated = false;
> -
> - reg = mmio_data_read(mmio, ~0);
> -
> - /* Clear pending SGIs on the distributor */
> - for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
> - u8 mask = reg >> (8 * (sgi - min_sgi));
> - u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
> - if (set) {
> - if ((*src & mask) != mask)
> - updated = true;
> - *src |= mask;
> - } else {
> - if (*src & mask)
> - updated = true;
> - *src &= ~mask;
> - }
> - }
> -
> - if (updated)
> - vgic_update_state(vcpu->kvm);
> -
> - return updated;
> -}
> -
> -static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - if (!mmio->is_write)
> - return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
> - else
> - return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
> -}
> -
> -static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - if (!mmio->is_write)
> - return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
> - else
> - return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
> -}
> -
> -/*
> - * I would have liked to use the kvm_bus_io_*() API instead, but it
> - * cannot cope with banked registers (only the VM pointer is passed
> - * around, and we need the vcpu). One of these days, someone please
> - * fix it!
> - */
> -struct mmio_range {
> - phys_addr_t base;
> - unsigned long len;
> - int bits_per_irq;
> - bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
> - phys_addr_t offset);
> -};
> -
> -static const struct mmio_range vgic_dist_ranges[] = {
> - {
> - .base = GIC_DIST_CTRL,
> - .len = 12,
> - .bits_per_irq = 0,
> - .handle_mmio = handle_mmio_misc,
> - },
> - {
> - .base = GIC_DIST_IGROUP,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_raz_wi,
> - },
> - {
> - .base = GIC_DIST_ENABLE_SET,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_set_enable_reg,
> - },
> - {
> - .base = GIC_DIST_ENABLE_CLEAR,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_clear_enable_reg,
> - },
> - {
> - .base = GIC_DIST_PENDING_SET,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_set_pending_reg,
> - },
> - {
> - .base = GIC_DIST_PENDING_CLEAR,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_clear_pending_reg,
> - },
> - {
> - .base = GIC_DIST_ACTIVE_SET,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_raz_wi,
> - },
> - {
> - .base = GIC_DIST_ACTIVE_CLEAR,
> - .len = VGIC_MAX_IRQS / 8,
> - .bits_per_irq = 1,
> - .handle_mmio = handle_mmio_raz_wi,
> - },
> - {
> - .base = GIC_DIST_PRI,
> - .len = VGIC_MAX_IRQS,
> - .bits_per_irq = 8,
> - .handle_mmio = handle_mmio_priority_reg,
> - },
> - {
> - .base = GIC_DIST_TARGET,
> - .len = VGIC_MAX_IRQS,
> - .bits_per_irq = 8,
> - .handle_mmio = handle_mmio_target_reg,
> - },
> - {
> - .base = GIC_DIST_CONFIG,
> - .len = VGIC_MAX_IRQS / 4,
> - .bits_per_irq = 2,
> - .handle_mmio = handle_mmio_cfg_reg,
> - },
> - {
> - .base = GIC_DIST_SOFTINT,
> - .len = 4,
> - .handle_mmio = handle_mmio_sgi_reg,
> - },
> - {
> - .base = GIC_DIST_SGI_PENDING_CLEAR,
> - .len = VGIC_NR_SGIS,
> - .handle_mmio = handle_mmio_sgi_clear,
> - },
> - {
> - .base = GIC_DIST_SGI_PENDING_SET,
> - .len = VGIC_NR_SGIS,
> - .handle_mmio = handle_mmio_sgi_set,
> - },
> - {}
> -};
> -
> -static const
> -struct mmio_range *find_matching_range(const struct mmio_range *ranges,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> +const
> +struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset)
> {
> const struct mmio_range *r = ranges;
> -
> while (r->len) {
> if (offset >= r->base &&
> (offset + mmio->len) <= (r->base + r->len))
> return r;
> r++;
> }
> -
> return NULL;
> }
>
> @@ -976,7 +581,7 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
> *
> * returns true if the MMIO access could be performed
> */
> -static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
> +bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
> struct kvm_exit_mmio *mmio,
> const struct mmio_range *ranges,
> unsigned long mmio_base)
> @@ -987,7 +592,7 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
> unsigned long offset;
>
> offset = mmio->phys_addr - mmio_base;
> - range = find_matching_range(ranges, mmio, offset);
> + range = vgic_find_matching_range(ranges, mmio, offset);
> if (unlikely(!range || !range->handle_mmio)) {
> pr_warn("Unhandled access %d %08llx %d\n",
> mmio->is_write, mmio->phys_addr, mmio->len);
> @@ -1013,27 +618,6 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
> return true;
> }
>
> -#define IS_IN_RANGE(addr, alen, base, len) \
> - (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
> -
> -static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
> - struct kvm_exit_mmio *mmio)
> -{
> - unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
> -
> - if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, base,
> - KVM_VGIC_V2_DIST_SIZE))
> - return false;
> -
> - /* GICv2 does not support accesses wider than 32 bits */
> - if (mmio->len > 4) {
> - kvm_inject_dabt(vcpu, mmio->phys_addr);
> - return true;
> - }
> -
> - return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
> -}
> -
> /**
> * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
> * @vcpu: pointer to the vcpu performing the access
> @@ -1052,52 +636,6 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
> return vgic_vm_op(vcpu->kvm, handle_mmio)(vcpu, run, mmio);
> }
>
> -static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
> -{
> - return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
> -}
> -
> -static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
> -{
> - struct kvm *kvm = vcpu->kvm;
> - struct vgic_dist *dist = &kvm->arch.vgic;
> - int nrcpus = atomic_read(&kvm->online_vcpus);
> - u8 target_cpus;
> - int sgi, mode, c, vcpu_id;
> -
> - vcpu_id = vcpu->vcpu_id;
> -
> - sgi = reg & 0xf;
> - target_cpus = (reg >> 16) & 0xff;
> - mode = (reg >> 24) & 3;
> -
> - switch (mode) {
> - case 0:
> - if (!target_cpus)
> - return;
> - break;
> -
> - case 1:
> - target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
> - break;
> -
> - case 2:
> - target_cpus = 1 << vcpu_id;
> - break;
> - }
> -
> - kvm_for_each_vcpu(c, vcpu, kvm) {
> - if (target_cpus & 1) {
> - /* Flag the SGI as pending */
> - vgic_dist_irq_set(vcpu, sgi);
> - *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
> - kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
> - }
> -
> - target_cpus >>= 1;
> - }
> -}
> -
> static int vgic_nr_shared_irqs(struct vgic_dist *dist)
> {
> return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
> @@ -1136,7 +674,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
> * Update the interrupt state and determine which CPUs have pending
> * interrupts. Must be called with distributor lock held.
> */
> -static void vgic_update_state(struct kvm *kvm)
> +void vgic_update_state(struct kvm *kvm)
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> struct kvm_vcpu *vcpu;
> @@ -1197,12 +735,12 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
> vgic_ops->disable_underflow(vcpu);
> }
>
> -static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> +void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> {
> vgic_ops->get_vmcr(vcpu, vmcr);
> }
>
> -static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> +void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> {
> vgic_ops->set_vmcr(vcpu, vmcr);
> }
> @@ -1251,8 +789,9 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
> /*
> * Queue an interrupt to a CPU virtual interface. Return true on success,
> * or false if it wasn't possible to queue it.
> + * sgi_source must be zero for any non-SGI interrupts.
> */
> -static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
> +bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> @@ -1301,37 +840,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
> return true;
> }
>
> -static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
> -{
> - struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> - unsigned long sources;
> - int vcpu_id = vcpu->vcpu_id;
> - int c;
> -
> - sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
> -
> - for_each_set_bit(c, &sources, dist->nr_cpus) {
> - if (vgic_queue_irq(vcpu, c, irq))
> - clear_bit(c, &sources);
> - }
> -
> - *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
> -
> - /*
> - * If the sources bitmap has been cleared it means that we
> - * could queue all the SGIs onto link registers (see the
> - * clear_bit above), and therefore we are done with them in
> - * our emulated gic and can get rid of them.
> - */
> - if (!sources) {
> - vgic_dist_irq_clear(vcpu, irq);
> - vgic_cpu_irq_clear(vcpu, irq);
> - return true;
> - }
> -
> - return false;
> -}
> -
> static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
> {
> if (vgic_irq_is_active(vcpu, irq))
> @@ -1523,7 +1031,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
> return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
> }
>
> -static void vgic_kick_vcpus(struct kvm *kvm)
> +void vgic_kick_vcpus(struct kvm *kvm)
> {
> struct kvm_vcpu *vcpu;
> int c;
> @@ -1807,7 +1315,7 @@ static void vgic_free_maps(struct vgic_dist *dist)
> kfree(dist->irq_spi_target);
> }
>
> -static int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
> +int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
> {
> int ret, i;
>
> @@ -1822,7 +1330,8 @@ static int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
> GFP_KERNEL);
> dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS,
> GFP_KERNEL);
> - dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
> + dist->irq_spi_target = kcalloc(nr_cpus,
> + sizeof(*dist->irq_spi_target),
> GFP_KERNEL);
> if (!dist->irq_sgi_sources ||
> !dist->irq_spi_cpu ||
> @@ -1849,39 +1358,6 @@ void kvm_vgic_destroy(struct kvm *kvm)
> vgic_free_maps(&kvm->arch.vgic);
> }
>
> -static int vgic_v2_init(struct kvm *kvm, const struct vgic_params *params)
> -{
> - struct vgic_dist *dist = &kvm->arch.vgic;
> - int ret, i;
> -
> - dist->nr_cpus = atomic_read(&kvm->online_vcpus);
> -
> - if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
> - IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
> - kvm_err("Need to set vgic distributor addresses first\n");
> - return -ENXIO;
> - }
> -
> - ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
> - if (ret) {
> - kvm_err("Unable to allocate maps\n");
> - return ret;
> - }
> -
> - ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
> - params->vcpu_base,
> - KVM_VGIC_V2_CPU_SIZE);
> - if (ret) {
> - kvm_err("Unable to remap VGIC CPU to VCPU\n");
> - return ret;
> - }
> -
> - for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
> - vgic_set_target_reg(kvm, 0, i);
> -
> - return 0;
> -}
> -
> /**
> * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
> * @kvm: pointer to the kvm struct
> @@ -1937,15 +1413,9 @@ out:
>
> static bool init_emulation_ops(struct kvm *kvm, int type)
> {
> - struct vgic_dist *dist = &kvm->arch.vgic;
> -
> switch (type) {
> case KVM_DEV_TYPE_ARM_VGIC_V2:
> - dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
> - dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
> - dist->vm_ops.unqueue_sgi = vgic_v2_unqueue_sgi;
> - dist->vm_ops.vgic_init = vgic_v2_init;
> - return true;
> + return vgic_v2_init_emulation_ops(kvm, type);
> }
> return false;
> }
> @@ -2086,185 +1556,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
> return r;
> }
>
> -static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio, phys_addr_t offset)
> +int vgic_has_attr_regs(const struct mmio_range *ranges,
> + phys_addr_t offset)
> {
> - bool updated = false;
> - struct vgic_vmcr vmcr;
> - u32 *vmcr_field;
> - u32 reg;
> -
> - vgic_get_vmcr(vcpu, &vmcr);
> -
> - switch (offset & ~0x3) {
> - case GIC_CPU_CTRL:
> - vmcr_field = &vmcr.ctlr;
> - break;
> - case GIC_CPU_PRIMASK:
> - vmcr_field = &vmcr.pmr;
> - break;
> - case GIC_CPU_BINPOINT:
> - vmcr_field = &vmcr.bpr;
> - break;
> - case GIC_CPU_ALIAS_BINPOINT:
> - vmcr_field = &vmcr.abpr;
> - break;
> - default:
> - BUG();
> - }
> -
> - if (!mmio->is_write) {
> - reg = *vmcr_field;
> - mmio_data_write(mmio, ~0, reg);
> - } else {
> - reg = mmio_data_read(mmio, ~0);
> - if (reg != *vmcr_field) {
> - *vmcr_field = reg;
> - vgic_set_vmcr(vcpu, &vmcr);
> - updated = true;
> - }
> - }
> - return updated;
> -}
> -
> -static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio, phys_addr_t offset)
> -{
> - return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
> -}
> -
> -static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
> - struct kvm_exit_mmio *mmio,
> - phys_addr_t offset)
> -{
> - u32 reg;
> -
> - if (mmio->is_write)
> - return false;
> -
> - /* GICC_IIDR */
> - reg = (PRODUCT_ID_KVM << 20) |
> - (GICC_ARCH_VERSION_V2 << 16) |
> - (IMPLEMENTER_ARM << 0);
> - mmio_data_write(mmio, ~0, reg);
> - return false;
> -}
> -
> -/*
> - * CPU Interface Register accesses - these are not accessed by the VM, but by
> - * user space for saving and restoring VGIC state.
> - */
> -static const struct mmio_range vgic_cpu_ranges[] = {
> - {
> - .base = GIC_CPU_CTRL,
> - .len = 12,
> - .handle_mmio = handle_cpu_mmio_misc,
> - },
> - {
> - .base = GIC_CPU_ALIAS_BINPOINT,
> - .len = 4,
> - .handle_mmio = handle_mmio_abpr,
> - },
> - {
> - .base = GIC_CPU_ACTIVEPRIO,
> - .len = 16,
> - .handle_mmio = handle_mmio_raz_wi,
> - },
> - {
> - .base = GIC_CPU_IDENT,
> - .len = 4,
> - .handle_mmio = handle_cpu_mmio_ident,
> - },
> -};
> -
> -static int vgic_attr_regs_access(struct kvm_device *dev,
> - struct kvm_device_attr *attr,
> - u32 *reg, bool is_write)
> -{
> - const struct mmio_range *r = NULL, *ranges;
> - phys_addr_t offset;
> - int ret, cpuid, c;
> - struct kvm_vcpu *vcpu, *tmp_vcpu;
> - struct vgic_dist *vgic;
> - struct kvm_exit_mmio mmio;
> -
> - offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> - cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> - KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> -
> - mutex_lock(&dev->kvm->lock);
> -
> - if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
> - ret = -EINVAL;
> - goto out;
> - }
> -
> - vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> - vgic = &dev->kvm->arch.vgic;
> -
> - mmio.len = 4;
> - mmio.is_write = is_write;
> - if (is_write)
> - mmio_data_write(&mmio, ~0, *reg);
> - switch (attr->group) {
> - case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> - mmio.phys_addr = vgic->vgic_dist_base + offset;
> - ranges = vgic_dist_ranges;
> - break;
> - case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> - mmio.phys_addr = vgic->vgic_cpu_base + offset;
> - ranges = vgic_cpu_ranges;
> - break;
> - default:
> - BUG();
> - }
> - r = find_matching_range(ranges, &mmio, offset);
> -
> - if (unlikely(!r || !r->handle_mmio)) {
> - ret = -ENXIO;
> - goto out;
> - }
> -
> -
> - spin_lock(&vgic->lock);
> -
> - /*
> - * Ensure that no other VCPU is running by checking the vcpu->cpu
> - * field. If no other VPCUs are running we can safely access the VGIC
> - * state, because even if another VPU is run after this point, that
> - * VCPU will not touch the vgic state, because it will block on
> - * getting the vgic->lock in kvm_vgic_sync_hwstate().
> - */
> - kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
> - if (unlikely(tmp_vcpu->cpu != -1)) {
> - ret = -EBUSY;
> - goto out_vgic_unlock;
> - }
> - }
> -
> - /*
> - * Move all pending IRQs from the LRs on all VCPUs so the pending
> - * state can be properly represented in the register state accessible
> - * through this API.
> - */
> - kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
> - vgic_unqueue_irqs(tmp_vcpu);
> -
> - offset -= r->base;
> - r->handle_mmio(vcpu, &mmio, offset);
> -
> - if (!is_write)
> - *reg = mmio_data_read(&mmio, ~0);
> + struct kvm_exit_mmio dev_attr_mmio;
>
> - ret = 0;
> -out_vgic_unlock:
> - spin_unlock(&vgic->lock);
> -out:
> - mutex_unlock(&dev->kvm->lock);
> - return ret;
> + dev_attr_mmio.len = 4;
> + if (vgic_find_matching_range(ranges, &dev_attr_mmio, offset))
> + return 0;
> + else
> + return -ENXIO;
> }
>
> -static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> +int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> {
> int r;
>
> @@ -2281,16 +1585,6 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> return (r == -ENODEV) ? -ENXIO : r;
> }
>
> - case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> - case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
> - u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> - u32 reg;
> -
> - if (get_user(reg, uaddr))
> - return -EFAULT;
> -
> - return vgic_attr_regs_access(dev, attr, ®, true);
> - }
> case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
> u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> u32 val;
> @@ -2319,7 +1613,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> return -ENXIO;
> }
>
> -static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> +int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> {
> int r = -ENXIO;
>
> @@ -2337,18 +1631,6 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> return -EFAULT;
> break;
> }
> -
> - case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> - case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
> - u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> - u32 reg = 0;
> -
> - r = vgic_attr_regs_access(dev, attr, ®, false);
> - if (r)
> - return r;
> - r = put_user(reg, uaddr);
> - break;
> - }
> case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
> u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
> @@ -2366,58 +1648,12 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> return r;
> }
>
> -static int vgic_has_attr_regs(const struct mmio_range *ranges,
> - phys_addr_t offset)
> -{
> - struct kvm_exit_mmio dev_attr_mmio;
> -
> - dev_attr_mmio.len = 4;
> - if (find_matching_range(ranges, &dev_attr_mmio, offset))
> - return 0;
> - else
> - return -ENXIO;
> -}
> -
> -static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
> -{
> - phys_addr_t offset;
> -
> - switch (attr->group) {
> - case KVM_DEV_ARM_VGIC_GRP_ADDR:
> - switch (attr->attr) {
> - case KVM_VGIC_V2_ADDR_TYPE_DIST:
> - case KVM_VGIC_V2_ADDR_TYPE_CPU:
> - return 0;
> - }
> - break;
> - case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> - offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> - return vgic_has_attr_regs(vgic_dist_ranges, offset);
> - case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> - offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> - return vgic_has_attr_regs(vgic_cpu_ranges, offset);
> - case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
> - case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
> - return 0;
> - }
> - return -ENXIO;
> -}
> -
> -static void vgic_destroy(struct kvm_device *dev)
> +void vgic_destroy(struct kvm_device *dev)
> {
> kfree(dev);
> }
>
> -static int vgic_create(struct kvm_device *dev, u32 type)
> +int vgic_create(struct kvm_device *dev, u32 type)
> {
> return kvm_vgic_create(dev->kvm, type);
> }
> -
> -struct kvm_device_ops kvm_arm_vgic_v2_ops = {
> - .name = "kvm-arm-vgic",
> - .create = vgic_create,
> - .destroy = vgic_destroy,
> - .set_attr = vgic_set_attr,
> - .get_attr = vgic_get_attr,
> - .has_attr = vgic_has_attr,
> -};
> diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
> new file mode 100644
> index 0000000..e900aeb
> --- /dev/null
> +++ b/virt/kvm/arm/vgic.h
> @@ -0,0 +1,113 @@
> +/*
> + * Copyright (C) 2012-2014 ARM Ltd.
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * Derived from virt/kvm/arm/vgic.c
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#define VGIC_ADDR_UNDEF (-1)
> +#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
> +
> +#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
> +#define IMPLEMENTER_ARM 0x43b
> +
> +#define ACCESS_READ_VALUE (1 << 0)
> +#define ACCESS_READ_RAZ (0 << 0)
> +#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
> +#define ACCESS_WRITE_IGNORED (0 << 1)
> +#define ACCESS_WRITE_SETBIT (1 << 1)
> +#define ACCESS_WRITE_CLEARBIT (2 << 1)
> +#define ACCESS_WRITE_VALUE (3 << 1)
> +#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
> +
> +#define VCPU_NOT_ALLOCATED ((u8)-1)
> +
> +unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
> +
> +void vgic_update_state(struct kvm *kvm);
> +int vgic_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs);
> +
> +u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
> +u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
> +
> +void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq);
> +void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq);
> +void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
> +void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
> + int irq, int val);
> +
> +void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> +void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> +
> +bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
> +void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
> +
> +void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
> + phys_addr_t offset, int mode);
> +bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset);
> +
> +static inline
> +u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
> +{
> + return *((u32 *)mmio->data) & mask;
> +}
> +
> +static inline
> +void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
> +{
> + *((u32 *)mmio->data) = value & mask;
> +}
> +
> +struct mmio_range {
> + phys_addr_t base;
> + unsigned long len;
> + int bits_per_irq;
> + bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset);
> +};
> +
> +#define IS_IN_RANGE(addr, alen, base, len) \
> + (((addr) >= (base)) && (((addr) + (alen)) < ((base) + (len))))
> +
> +const
> +struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
> + struct kvm_exit_mmio *mmio,
> + phys_addr_t offset);
> +
> +bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + struct kvm_exit_mmio *mmio,
> + const struct mmio_range *ranges,
> + unsigned long mmio_base);
> +
> +bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset, int vcpu_id, int access);
> +
> +bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset, int vcpu_id, int access);
> +
> +bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
> + phys_addr_t offset);
> +
> +void vgic_kick_vcpus(struct kvm *kvm);
> +
> +int vgic_create(struct kvm_device *dev, u32 type);
> +void vgic_destroy(struct kvm_device *dev);
> +
> +int vgic_has_attr_regs(const struct mmio_range *ranges, phys_addr_t offset);
> +int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
> +int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
> +
> +bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type);
> --
> 1.7.9.5
>
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
>
-- IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2557590
ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2548782
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 10/14] arm/arm64: KVM: add opaque private pointer to MMIO accessors
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (8 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 09/14] arm/arm64: KVM: split GICv2 specific emulation code from vgic.c Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 11/14] arm/arm64: KVM: add virtual GICv3 distributor emulation Andre Przywara
` (4 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
For a GICv2 there is always only one (v)CPU involved: the one that
does the access. On a GICv3 the access to a CPU redistributor is
memory-mapped, but not banked, so the (v)CPU affected is determined by
looking at the MMIO address region being accessed.
To allow passing the affected CPU into the accessors, extend them to
take an opaque private pointer parameter.
For the current GICv2 emulation we ignore it and simply pass NULL
on the call.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic-v2-emul.c | 41 ++++++++++++++++++++++++-----------------
virt/kvm/arm/vgic.c | 16 +++++++++-------
virt/kvm/arm/vgic.h | 7 ++++---
3 files changed, 37 insertions(+), 27 deletions(-)
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index ba5f873..30d5c4c 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -41,7 +41,8 @@ static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
}
static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
{
u32 reg;
u32 word_offset = offset & 3;
@@ -77,7 +78,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
@@ -85,7 +86,7 @@ static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
@@ -93,7 +94,7 @@ static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
@@ -101,7 +102,7 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
@@ -109,7 +110,7 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
u32 *reg;
@@ -169,7 +170,7 @@ static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
u32 reg;
@@ -197,7 +198,8 @@ static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
}
static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
{
u32 *reg;
@@ -208,7 +210,8 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
}
static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
{
u32 reg;
vgic_reg_access(mmio, ®, offset,
@@ -281,7 +284,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
if (!mmio->is_write)
return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
@@ -291,7 +294,7 @@ static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
if (!mmio->is_write)
return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
@@ -405,7 +408,8 @@ static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true;
}
- return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_dist_ranges, base, NULL);
}
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
@@ -537,7 +541,8 @@ bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type)
}
static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
{
bool updated = false;
struct vgic_vmcr vmcr;
@@ -578,14 +583,16 @@ static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
}
static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
{
- return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
+ return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT,
+ private);
}
static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
+ phys_addr_t offset, void *private)
{
u32 reg;
@@ -701,7 +708,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
vgic_unqueue_irqs(tmp_vcpu);
offset -= r->base;
- r->handle_mmio(vcpu, &mmio, offset);
+ r->handle_mmio(vcpu, &mmio, offset, NULL);
if (!is_write)
*reg = mmio_data_read(&mmio, ~0);
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 0140505..4c1a129 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -327,7 +327,8 @@ void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
}
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
{
vgic_reg_access(mmio, NULL, offset,
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
@@ -536,7 +537,7 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
*/
static bool call_range_handler(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
- unsigned long offset,
+ unsigned long offset, void *private,
const struct mmio_range *range)
{
u32 *data32 = (void *)mmio->data;
@@ -544,7 +545,7 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
bool ret;
if (likely(mmio->len <= 4))
- return range->handle_mmio(vcpu, mmio, offset);
+ return range->handle_mmio(vcpu, mmio, offset, private);
/*
* We assume that any access greater than 4 bytes is actually
@@ -557,14 +558,14 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
mmio32.phys_addr = mmio->phys_addr + 4;
if (mmio->is_write)
*(u32 *)mmio32.data = data32[1];
- ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
+ ret = range->handle_mmio(vcpu, &mmio32, offset + 4, private);
if (!mmio->is_write)
data32[1] = *(u32 *)mmio32.data;
mmio32.phys_addr = mmio->phys_addr;
if (mmio->is_write)
*(u32 *)mmio32.data = data32[0];
- ret |= range->handle_mmio(vcpu, &mmio32, offset);
+ ret |= range->handle_mmio(vcpu, &mmio32, offset, private);
if (!mmio->is_write)
data32[0] = *(u32 *)mmio32.data;
@@ -584,7 +585,7 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio,
const struct mmio_range *ranges,
- unsigned long mmio_base)
+ unsigned long mmio_base, void *private)
{
const struct mmio_range *range;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -602,7 +603,8 @@ bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
spin_lock(&vcpu->kvm->arch.vgic.lock);
offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
- updated_state = call_range_handler(vcpu, mmio, offset, range);
+ updated_state = call_range_handler(vcpu, mmio, offset, private,
+ range);
} else {
if (!mmio->is_write)
memset(mmio->data, 0, mmio->len);
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index e900aeb..9078e2a 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -57,7 +57,7 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
phys_addr_t offset, int mode);
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
+ phys_addr_t offset, void *private);
static inline
u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
@@ -76,7 +76,7 @@ struct mmio_range {
unsigned long len;
int bits_per_irq;
bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
+ phys_addr_t offset, void *private);
};
#define IS_IN_RANGE(addr, alen, base, len) \
@@ -90,7 +90,8 @@ struct mmio_range *vgic_find_matching_range(const struct mmio_range *ranges,
bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio,
const struct mmio_range *ranges,
- unsigned long mmio_base);
+ unsigned long mmio_base,
+ void *private);
bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
phys_addr_t offset, int vcpu_id, int access);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 11/14] arm/arm64: KVM: add virtual GICv3 distributor emulation
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (9 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 10/14] arm/arm64: KVM: add opaque private pointer to MMIO accessors Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 21:39 ` Chalamarla, Tirumalesh
2014-06-19 9:45 ` [PATCH 12/14] arm/arm64: KVM: add SGI system register trapping Andre Przywara
` (3 subsequent siblings)
14 siblings, 1 reply; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
With everything separated and prepared, we implement a model of a
GICv3 distributor and redistributors by using the existing framework
to provide handler functions for each register group.
Currently we limit the emulation to a model enforcing a single
security state, with SRE==1 (forcing system register access) and
ARE==1 (allowing more than 8 VCPUs).
We share some of functions provided for GICv2 emulation, but take
the different ways of addressing (v)CPUs into account.
Save and restore is currently not implemented.
Similar to the split-off GICv2 specific code, the new emulation code
goes into a new file (vgic-v3-emul.c).
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm64/kvm/Makefile | 1 +
include/kvm/arm_vgic.h | 11 +-
include/linux/irqchip/arm-gic-v3.h | 26 ++
include/linux/kvm_host.h | 1 +
include/uapi/linux/kvm.h | 1 +
virt/kvm/arm/vgic-v3-emul.c | 895 ++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 11 +-
virt/kvm/arm/vgic.h | 3 +
virt/kvm/kvm_main.c | 3 +
9 files changed, 949 insertions(+), 3 deletions(-)
create mode 100644 virt/kvm/arm/vgic-v3-emul.c
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index f241db6..2fba3a5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -21,6 +21,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 8aa8482..3b164ee 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -151,7 +151,11 @@ struct vgic_dist {
/* Distributor and vcpu interface mapping in the guest */
phys_addr_t vgic_dist_base;
- phys_addr_t vgic_cpu_base;
+ /* GICv2 and GICv3 use different mapped register blocks */
+ union {
+ phys_addr_t vgic_cpu_base;
+ phys_addr_t vgic_redist_base;
+ };
/* Distributor enabled */
u32 enabled;
@@ -176,6 +180,10 @@ struct vgic_dist {
/* Target CPU for each IRQ */
u8 *irq_spi_cpu;
+
+ /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
+ u32 *irq_spi_mpidr;
+
struct vgic_bitmap *irq_spi_target;
/* Bitmap indicating which CPU has something pending */
@@ -253,6 +261,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9eac712..9f17e57 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -31,6 +31,7 @@
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
#define GICD_SEIR 0x0068
+#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
@@ -39,19 +40,38 @@
#define GICD_ICACTIVER 0x0380
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
+#define GICD_IGRPMODR 0x0D00
+#define GICD_NSACR 0x0E00
#define GICD_IROUTER 0x6000
+#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
+/*
+ * Non-ARE distributor registers, needed to provide the RES0
+ * semantics for KVM's emulated GICv3
+ */
+#define GICD_ITARGETSR 0x0800
+#define GICD_SGIR 0x0F00
+#define GICD_CPENDSGIR 0x0F10
+#define GICD_SPENDSGIR 0x0F20
+
+
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_TYPER_LPIS (1U << 17)
+#define GICD_TYPER_MBIS (1U << 16)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
#define GIC_PIDR2_ARCH_MASK 0xf0
+#define GIC_V3_DIST_SIZE 0x10000
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -70,6 +90,7 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_WAKER_ProcessorSleep (1U << 1)
@@ -78,6 +99,7 @@
/*
* Re-Distributor registers, offsets from SGI_base
*/
+#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
@@ -86,10 +108,14 @@
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_IGRPMODR0 GICD_IGRPMODR
+#define GICR_NSACR GICD_NSACR
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define GIC_V3_REDIST_SIZE 0x20000
+
/*
* CPU interface registers
*/
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ec4e3bd..7fe8203 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1089,6 +1089,7 @@ extern struct kvm_device_ops kvm_xics_ops;
extern struct kvm_device_ops kvm_vfio_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_flic_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index e11d8f1..7eca98c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP_DEL 2
#define KVM_DEV_TYPE_ARM_VGIC_V2 5
#define KVM_DEV_TYPE_FLIC 6
+#define KVM_DEV_TYPE_ARM_VGIC_V3 7
/*
* ioctls for VM fds
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
new file mode 100644
index 0000000..68821fd
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -0,0 +1,895 @@
+/*
+ * GICv3 distributor and redistributor emulation on GICv3 hardware
+ *
+ * able to run on a pure native host GICv3 (which forces ARE=1)
+ *
+ * forcing ARE=1 and DS=1, not covering LPIs yet (TYPER.LPIS=0)
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+#define INTERRUPT_ID_BITS 10
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
+{
+ u32 reg = 0, val;
+ u32 word_offset = offset & 3;
+
+ switch (offset & ~3) {
+ case GICD_CTLR:
+ /*
+ * Force ARE and DS to 1, the guest cannot change this.
+ * For the time being we only support Group1 interrupts.
+ */
+ if (vcpu->kvm->arch.vgic.enabled)
+ reg = GICD_CTLR_ENABLE_G1A;
+ reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_G1A);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ break;
+ case GICD_TYPER:
+ /*
+ * as this implementation does not provide compatibility
+ * with GICv2 (ARE==1), we report zero CPUs in the lower 5 bits.
+ * Also TYPER.LPIS is 0 for now and TYPER.MBIS is not supported.
+ */
+
+ /* claim we support at most 1024 (-4) SPIs via this interface */
+ val = min(vcpu->kvm->arch.vgic.nr_irqs, 1024);
+ reg |= (val >> 5) - 1;
+
+ reg |= (INTERRUPT_ID_BITS - 1) << 19;
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ case GICD_IIDR:
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ default:
+ vgic_reg_access(mmio, NULL, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static u32 compress_mpidr(unsigned long mpidr)
+{
+ u32 ret;
+
+ ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
+
+ return ret;
+}
+
+static unsigned long uncompress_mpidr(u32 value)
+{
+ unsigned long mpidr;
+
+ mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
+ mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
+
+ return mpidr;
+}
+
+/*
+ * Lookup the given MPIDR value to get the vcpu_id (if there is one)
+ * and store that in the irq_spi_cpu[] array.
+ * This limits the number of VCPUs to 255 for now, extending the data
+ * type (or storing kvm_vcpu poiners) should lift the limit.
+ * Store the original MPIDR value in an extra array.
+ * Unallocated MPIDRs are translated to a special value and catched
+ * before any array accesses.
+ */
+static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, void *private)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int irq;
+ u32 reg;
+ int vcpu_id;
+ unsigned long *bmap, mpidr;
+ u32 word_offset = offset & 3;
+
+ /*
+ * Private interrupts cannot be re-routed, so this register
+ * is RES0 for any IRQ < 32.
+ * Also the upper 32 bits of each 64 bit register are zero,
+ * as we don't support Aff3 and that's the only value up there.
+ */
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS * 8) || (offset & 4) == 4) {
+ vgic_reg_access(mmio, NULL, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ irq = (offset / 8) - VGIC_NR_PRIVATE_IRQS;
+
+ /* get the stored MPIDR for this IRQ */
+ mpidr = uncompress_mpidr(dist->irq_spi_mpidr[irq]);
+ mpidr &= MPIDR_HWID_BITMASK;
+ reg = mpidr;
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+
+ if (!mmio->is_write)
+ return false;
+
+ /*
+ * Now clear the currently assigned vCPU from the map, making room
+ * for the new one to be written below
+ */
+ vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ clear_bit(irq, bmap);
+ }
+
+ dist->irq_spi_mpidr[irq] = compress_mpidr(reg);
+ vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
+
+ /*
+ * The spec says that non-existent MPIDR values should not be
+ * forwarded to any existent (v)CPU, but should be able to become
+ * pending anyway. We simply keep the irq_spi_target[] array empty, so
+ * the interrupt will never be injected.
+ * irq_spi_cpu[irq] gets a magic value in this case.
+ */
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ dist->irq_spi_cpu[irq] = vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ set_bit(irq, bmap);
+ } else
+ dist->irq_spi_cpu[irq] = VCPU_NOT_ALLOCATED;
+
+ vgic_update_state(kvm);
+
+ return true;
+}
+
+static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, void *private)
+{
+ u32 reg = 0;
+
+ switch (offset + GICD_IDREGS) {
+ case GICD_PIDR2:
+ reg = 0x3b;
+ break;
+ }
+
+ vgic_reg_access(mmio, ®, offset & 3,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static const struct mmio_range vgic_dist_ranges[] = {
+ { /*
+ * handling CTLR, TYPER, IIDR and STATUSR
+ */
+ .base = GICD_CTLR,
+ .len = 20,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc,
+ },
+ {
+ /* when DS=1, this is RAZ/WI */
+ .base = GICD_SETSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* when DS=1, this is RAZ/WI */
+ .base = GICD_CLRSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IGROUPR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ISENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_dist,
+ },
+ {
+ .base = GICD_ICENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_dist,
+ },
+ {
+ .base = GICD_ISPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_dist,
+ },
+ {
+ .base = GICD_ICPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_dist,
+ },
+ {
+ .base = GICD_ISACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IPRIORITYR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_dist,
+ },
+ {
+ /* TARGETSRn is RES0 when ARE=1 */
+ .base = GICD_ITARGETSR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICFGR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_dist,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_IGRPMODR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* with DS==1 this is RAZ/WI */
+ .base = GICD_NSACR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ /* the next three blocks are RES0 if ARE=1 */
+ {
+ .base = GICD_SGIR,
+ .len = 4,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_CPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_SPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IROUTER,
+ .len = 0x2000,
+ .bits_per_irq = 64,
+ .handle_mmio = handle_mmio_route_reg,
+ },
+ {
+ .base = GICD_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+ u32 *reg;
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ target_redist_vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ *(int *)private, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset);
+}
+
+static const struct mmio_range vgic_redist_sgi_ranges[] = {
+ {
+ .base = GICR_IGROUPR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_ISENABLER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_redist,
+ },
+ {
+ .base = GICR_ICENABLER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_redist,
+ },
+ {
+ .base = GICR_ISPENDR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_redist,
+ },
+ {
+ .base = GICR_ICPENDR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_redist,
+ },
+ {
+ .base = GICR_ISACTIVER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_ICACTIVER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IPRIORITYR0,
+ .len = 32,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_redist,
+ },
+ {
+ .base = GICR_ICFGR0,
+ .len = 8,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_redist,
+ },
+ {
+ .base = GICR_IGRPMODR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_NSACR,
+ .len = 4,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {},
+};
+
+static bool handle_mmio_misc_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, void *private)
+{
+ u32 reg;
+ u32 word_offset = offset & 3;
+ u64 mpidr;
+ struct kvm_vcpu *target_redist_vcpu = private;
+ int target_vcpu_id = target_redist_vcpu->vcpu_id;
+
+ switch (offset & ~3) {
+ case GICR_CTLR:
+ /* since we don't support LPIs, this register is zero for now */
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ break;
+ case GICR_TYPER + 4:
+ mpidr = kvm_vcpu_get_mpidr(target_redist_vcpu);
+ reg = compress_mpidr(mpidr);
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ case GICR_TYPER:
+ reg = target_redist_vcpu->vcpu_id << 8;
+ if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+ reg |= GICR_TYPER_LAST;
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ case GICR_IIDR:
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ default:
+ vgic_reg_access(mmio, NULL, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static const struct mmio_range vgic_redist_ranges[] = {
+ { /*
+ * handling CTLR, IIDR, TYPER and STATUSR
+ */
+ .base = GICR_CTLR,
+ .len = 20,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc_redist,
+ },
+ {
+ .base = GICR_WAKER,
+ .len = 4,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+/*
+ * this is the stub handling both dist and redist MMIO exits for v3
+ * does some vcpu_id calculation on the redist MMIO to use a possibly
+ * different VCPU than the current one
+ */
+static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long dbase = dist->vgic_dist_base;
+ unsigned long rdbase = dist->vgic_redist_base;
+ int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
+ int vcpu_id;
+ struct kvm_vcpu *target_redist_vcpu;
+
+ if (IS_IN_RANGE(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_dist_ranges, dbase, NULL);
+ }
+
+ if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, rdbase,
+ GIC_V3_REDIST_SIZE * nrcpus))
+ return false;
+
+ vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
+ rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
+ target_redist_vcpu = kvm_get_vcpu(vcpu->kvm, vcpu_id);
+
+ if (mmio->phys_addr >= rdbase + 0x10000)
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_redist_sgi_ranges,
+ rdbase + 0x10000,
+ target_redist_vcpu);
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_redist_ranges,
+ rdbase, target_redist_vcpu);
+}
+
+static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
+{
+ if (vgic_queue_irq(vcpu, 0, irq)) {
+ vgic_dist_irq_clear(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+static int vgic_v3_init_maps(struct vgic_dist *dist, int nr_cpus, int nr_irqs)
+{
+ int nr_spis = nr_irqs - VGIC_NR_PRIVATE_IRQS;
+
+ dist->irq_spi_mpidr = kzalloc(nr_spis * sizeof(dist->irq_spi_mpidr[0]),
+ GFP_KERNEL);
+
+ if (!dist->irq_spi_mpidr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int vgic_v3_init(struct kvm *kvm, const struct vgic_params *params)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret, i;
+ u32 mpidr;
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ return -ENXIO;
+ }
+
+ dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+
+ ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (!ret)
+ ret = vgic_v3_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ return ret;
+ }
+
+ mpidr = compress_mpidr(kvm_vcpu_get_mpidr(kvm_get_vcpu(kvm, 0)));
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
+ dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
+ dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
+ vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
+ }
+
+ return 0;
+}
+
+static void vgic_v3_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int source)
+{
+ return;
+}
+
+bool vgic_v3_init_emulation_ops(struct kvm *kvm, int type)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
+ dist->vm_ops.unqueue_sgi = vgic_v3_unqueue_sgi;
+ dist->vm_ops.vgic_init = vgic_v3_init;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/*
+ * triggered by a system register access trap, called from the sysregs
+ * handling code there.
+ * The register contains the upper three affinity levels of the target
+ * processors as well as a bitmask of 16 Aff0 CPUs.
+ * Iterate over all VCPUs to check for matching ones or signal on
+ * all-but-self if the mode bit is set.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *c_vcpu;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u16 target_cpus;
+ u64 mpidr, mpidr_h, mpidr_l;
+ int sgi, mode, c, vcpu_id;
+ int updated = 0;
+
+ vcpu_id = vcpu->vcpu_id;
+
+ sgi = (reg >> 24) & 0xf;
+ mode = (reg >> 40) & 0x1;
+ target_cpus = reg & 0xffff;
+ mpidr = ((reg >> 48) & 0xff) << MPIDR_LEVEL_SHIFT(3);
+ mpidr |= ((reg >> 32) & 0xff) << MPIDR_LEVEL_SHIFT(2);
+ mpidr |= ((reg >> 16) & 0xff) << MPIDR_LEVEL_SHIFT(1);
+ mpidr &= ~MPIDR_LEVEL_MASK;
+
+ /*
+ * We take the dist lock here, because we come from the sysregs
+ * code path and not from MMIO (where this is already done)
+ */
+ spin_lock(&dist->lock);
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+ if (target_cpus == 0)
+ break;
+ if (mode && c == vcpu_id) /* not to myself */
+ continue;
+ if (!mode) {
+ mpidr_h = kvm_vcpu_get_mpidr(c_vcpu);
+ mpidr_l = MPIDR_AFFINITY_LEVEL(mpidr_h, 0);
+ mpidr_h &= ~MPIDR_LEVEL_MASK;
+ if (mpidr != mpidr_h)
+ continue;
+ if (!(target_cpus & BIT(mpidr_l)))
+ continue;
+ target_cpus &= ~BIT(mpidr_l);
+ }
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set(c_vcpu, sgi);
+ updated = 1;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+ }
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+ spin_unlock(&dist->lock);
+ if (updated)
+ vgic_kick_vcpus(vcpu->kvm);
+}
+
+
+static int vgic_v3_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int r;
+
+ r = vgic_get_common_attr(dev, attr);
+ if (!r)
+ return r;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ }
+
+ return r;
+}
+
+static int vgic_v3_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+
+ if (!ret)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v3_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return -ENXIO;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
+ return 0;
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v3_ops = {
+ .name = "kvm-arm-vgic-v3",
+ .create = vgic_create,
+ .destroy = vgic_destroy,
+ .set_attr = vgic_v3_set_attr,
+ .get_attr = vgic_v3_get_attr,
+ .has_attr = vgic_v3_has_attr,
+};
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 4c1a129..fa150c7 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1071,7 +1071,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
struct kvm_vcpu *vcpu;
int is_edge, is_level;
int enabled;
- bool ret = true;
+ bool ret = true, can_inject = true;
spin_lock(&dist->lock);
@@ -1086,6 +1086,11 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
+ if (cpuid == VCPU_NOT_ALLOCATED) {
+ /* Pretend we use CPU0, and prevent injection */
+ cpuid = 0;
+ can_inject = false;
+ }
vcpu = kvm_get_vcpu(kvm, cpuid);
}
@@ -1098,7 +1103,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
enabled = vgic_irq_is_enabled(vcpu, irq_num);
- if (!enabled) {
+ if (!enabled || !can_inject) {
ret = false;
goto out;
}
@@ -1314,6 +1319,7 @@ static void vgic_free_maps(struct vgic_dist *dist)
vgic_free_bitmap(&dist->irq_spi_target[i]);
kfree(dist->irq_sgi_sources);
kfree(dist->irq_spi_cpu);
+ kfree(dist->irq_spi_mpidr);
kfree(dist->irq_spi_target);
}
@@ -1463,6 +1469,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
init_emulation_ops(kvm, type);
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 9078e2a..f24cdd0 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -34,6 +34,8 @@
#define VCPU_NOT_ALLOCATED ((u8)-1)
+#define VCPU_NOT_ALLOCATED ((u8)-1)
+
unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
void vgic_update_state(struct kvm *kvm);
@@ -112,3 +114,4 @@ int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type);
+bool vgic_v3_init_emulation_ops(struct kvm *kvm, int type);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4b6c01b..c2fac5d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2286,6 +2286,9 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
case KVM_DEV_TYPE_ARM_VGIC_V2:
ops = &kvm_arm_vgic_v2_ops;
break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ ops = &kvm_arm_vgic_v3_ops;
+ break;
#endif
#ifdef CONFIG_S390
case KVM_DEV_TYPE_FLIC:
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 11/14] arm/arm64: KVM: add virtual GICv3 distributor emulation
2014-06-19 9:45 ` [PATCH 11/14] arm/arm64: KVM: add virtual GICv3 distributor emulation Andre Przywara
@ 2014-06-19 21:39 ` Chalamarla, Tirumalesh
0 siblings, 0 replies; 25+ messages in thread
From: Chalamarla, Tirumalesh @ 2014-06-19 21:39 UTC (permalink / raw)
To: linux-arm-kernel
Again what is the need for vgic-v3.emul.c when we have vgic-v3.c ?
-----Original Message-----
From: kvmarm-bounces@lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
Sent: Thursday, June 19, 2014 2:46 AM
To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
Cc: christoffer.dall at linaro.org
Subject: [PATCH 11/14] arm/arm64: KVM: add virtual GICv3 distributor emulation
With everything separated and prepared, we implement a model of a
GICv3 distributor and redistributors by using the existing framework to provide handler functions for each register group.
Currently we limit the emulation to a model enforcing a single security state, with SRE==1 (forcing system register access) and
ARE==1 (allowing more than 8 VCPUs).
We share some of functions provided for GICv2 emulation, but take the different ways of addressing (v)CPUs into account.
Save and restore is currently not implemented.
Similar to the split-off GICv2 specific code, the new emulation code goes into a new file (vgic-v3-emul.c).
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm64/kvm/Makefile | 1 +
include/kvm/arm_vgic.h | 11 +-
include/linux/irqchip/arm-gic-v3.h | 26 ++
include/linux/kvm_host.h | 1 +
include/uapi/linux/kvm.h | 1 +
virt/kvm/arm/vgic-v3-emul.c | 895 ++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 11 +-
virt/kvm/arm/vgic.h | 3 +
virt/kvm/kvm_main.c | 3 +
9 files changed, 949 insertions(+), 3 deletions(-) create mode 100644 virt/kvm/arm/vgic-v3-emul.c
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index f241db6..2fba3a5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -21,6 +21,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 8aa8482..3b164ee 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -151,7 +151,11 @@ struct vgic_dist {
/* Distributor and vcpu interface mapping in the guest */
phys_addr_t vgic_dist_base;
- phys_addr_t vgic_cpu_base;
+ /* GICv2 and GICv3 use different mapped register blocks */
+ union {
+ phys_addr_t vgic_cpu_base;
+ phys_addr_t vgic_redist_base;
+ };
/* Distributor enabled */
u32 enabled;
@@ -176,6 +180,10 @@ struct vgic_dist {
/* Target CPU for each IRQ */
u8 *irq_spi_cpu;
+
+ /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
+ u32 *irq_spi_mpidr;
+
struct vgic_bitmap *irq_spi_target;
/* Bitmap indicating which CPU has something pending */ @@ -253,6 +261,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9eac712..9f17e57 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -31,6 +31,7 @@
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
#define GICD_SEIR 0x0068
+#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
@@ -39,19 +40,38 @@
#define GICD_ICACTIVER 0x0380
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
+#define GICD_IGRPMODR 0x0D00
+#define GICD_NSACR 0x0E00
#define GICD_IROUTER 0x6000
+#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
+/*
+ * Non-ARE distributor registers, needed to provide the RES0
+ * semantics for KVM's emulated GICv3
+ */
+#define GICD_ITARGETSR 0x0800
+#define GICD_SGIR 0x0F00
+#define GICD_CPENDSGIR 0x0F10
+#define GICD_SPENDSGIR 0x0F20
+
+
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_TYPER_LPIS (1U << 17)
+#define GICD_TYPER_MBIS (1U << 16)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
#define GIC_PIDR2_ARCH_MASK 0xf0
+#define GIC_V3_DIST_SIZE 0x10000
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -70,6 +90,7 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_WAKER_ProcessorSleep (1U << 1)
@@ -78,6 +99,7 @@
/*
* Re-Distributor registers, offsets from SGI_base
*/
+#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
@@ -86,10 +108,14 @@
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_IGRPMODR0 GICD_IGRPMODR
+#define GICR_NSACR GICD_NSACR
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define GIC_V3_REDIST_SIZE 0x20000
+
/*
* CPU interface registers
*/
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ec4e3bd..7fe8203 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1089,6 +1089,7 @@ extern struct kvm_device_ops kvm_xics_ops; extern struct kvm_device_ops kvm_vfio_ops; extern struct kvm_device_ops kvm_arm_vgic_v2_ops; extern struct kvm_device_ops kvm_flic_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e11d8f1..7eca98c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP_DEL 2
#define KVM_DEV_TYPE_ARM_VGIC_V2 5
#define KVM_DEV_TYPE_FLIC 6
+#define KVM_DEV_TYPE_ARM_VGIC_V3 7
/*
* ioctls for VM fds
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c new file mode 100644 index 0000000..68821fd
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -0,0 +1,895 @@
+/*
+ * GICv3 distributor and redistributor emulation on GICv3 hardware
+ *
+ * able to run on a pure native host GICv3 (which forces ARE=1)
+ *
+ * forcing ARE=1 and DS=1, not covering LPIs yet (TYPER.LPIS=0)
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+#define INTERRUPT_ID_BITS 10
+
+static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio, phys_addr_t offset,
+ void *private)
+{
+ u32 reg = 0, val;
+ u32 word_offset = offset & 3;
+
+ switch (offset & ~3) {
+ case GICD_CTLR:
+ /*
+ * Force ARE and DS to 1, the guest cannot change this.
+ * For the time being we only support Group1 interrupts.
+ */
+ if (vcpu->kvm->arch.vgic.enabled)
+ reg = GICD_CTLR_ENABLE_G1A;
+ reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ if (mmio->is_write) {
+ vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_G1A);
+ vgic_update_state(vcpu->kvm);
+ return true;
+ }
+ break;
+ case GICD_TYPER:
+ /*
+ * as this implementation does not provide compatibility
+ * with GICv2 (ARE==1), we report zero CPUs in the lower 5 bits.
+ * Also TYPER.LPIS is 0 for now and TYPER.MBIS is not supported.
+ */
+
+ /* claim we support at most 1024 (-4) SPIs via this interface */
+ val = min(vcpu->kvm->arch.vgic.nr_irqs, 1024);
+ reg |= (val >> 5) - 1;
+
+ reg |= (INTERRUPT_ID_BITS - 1) << 19;
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ case GICD_IIDR:
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ default:
+ vgic_reg_access(mmio, NULL, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ u32 *reg;
+
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
+ vgic_reg_access(mmio, NULL, offset & 3,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset); }
+
+static u32 compress_mpidr(unsigned long mpidr) {
+ u32 ret;
+
+ ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
+ ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
+
+ return ret;
+}
+
+static unsigned long uncompress_mpidr(u32 value) {
+ unsigned long mpidr;
+
+ mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
+ mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
+
+ return mpidr;
+}
+
+/*
+ * Lookup the given MPIDR value to get the vcpu_id (if there is one)
+ * and store that in the irq_spi_cpu[] array.
+ * This limits the number of VCPUs to 255 for now, extending the data
+ * type (or storing kvm_vcpu poiners) should lift the limit.
+ * Store the original MPIDR value in an extra array.
+ * Unallocated MPIDRs are translated to a special value and catched
+ * before any array accesses.
+ */
+static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, void *private) {
+ struct kvm *kvm = vcpu->kvm;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int irq;
+ u32 reg;
+ int vcpu_id;
+ unsigned long *bmap, mpidr;
+ u32 word_offset = offset & 3;
+
+ /*
+ * Private interrupts cannot be re-routed, so this register
+ * is RES0 for any IRQ < 32.
+ * Also the upper 32 bits of each 64 bit register are zero,
+ * as we don't support Aff3 and that's the only value up there.
+ */
+ if (unlikely(offset < VGIC_NR_PRIVATE_IRQS * 8) || (offset & 4) == 4) {
+ vgic_reg_access(mmio, NULL, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ irq = (offset / 8) - VGIC_NR_PRIVATE_IRQS;
+
+ /* get the stored MPIDR for this IRQ */
+ mpidr = uncompress_mpidr(dist->irq_spi_mpidr[irq]);
+ mpidr &= MPIDR_HWID_BITMASK;
+ reg = mpidr;
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+
+ if (!mmio->is_write)
+ return false;
+
+ /*
+ * Now clear the currently assigned vCPU from the map, making room
+ * for the new one to be written below
+ */
+ vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ clear_bit(irq, bmap);
+ }
+
+ dist->irq_spi_mpidr[irq] = compress_mpidr(reg);
+ vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
+
+ /*
+ * The spec says that non-existent MPIDR values should not be
+ * forwarded to any existent (v)CPU, but should be able to become
+ * pending anyway. We simply keep the irq_spi_target[] array empty, so
+ * the interrupt will never be injected.
+ * irq_spi_cpu[irq] gets a magic value in this case.
+ */
+ if (likely(vcpu)) {
+ vcpu_id = vcpu->vcpu_id;
+ dist->irq_spi_cpu[irq] = vcpu_id;
+ bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
+ set_bit(irq, bmap);
+ } else
+ dist->irq_spi_cpu[irq] = VCPU_NOT_ALLOCATED;
+
+ vgic_update_state(kvm);
+
+ return true;
+}
+
+static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, void *private) {
+ u32 reg = 0;
+
+ switch (offset + GICD_IDREGS) {
+ case GICD_PIDR2:
+ reg = 0x3b;
+ break;
+ }
+
+ vgic_reg_access(mmio, ®, offset & 3,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+
+ return false;
+}
+
+static const struct mmio_range vgic_dist_ranges[] = {
+ { /*
+ * handling CTLR, TYPER, IIDR and STATUSR
+ */
+ .base = GICD_CTLR,
+ .len = 20,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc,
+ },
+ {
+ /* when DS=1, this is RAZ/WI */
+ .base = GICD_SETSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* when DS=1, this is RAZ/WI */
+ .base = GICD_CLRSPI_SR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IGROUPR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ISENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_dist,
+ },
+ {
+ .base = GICD_ICENABLER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_dist,
+ },
+ {
+ .base = GICD_ISPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_dist,
+ },
+ {
+ .base = GICD_ICPENDR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_dist,
+ },
+ {
+ .base = GICD_ISACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICACTIVER,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IPRIORITYR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_dist,
+ },
+ {
+ /* TARGETSRn is RES0 when ARE=1 */
+ .base = GICD_ITARGETSR,
+ .len = 0x400,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_ICFGR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_dist,
+ },
+ {
+ /* this is RAZ/WI when DS=1 */
+ .base = GICD_IGRPMODR,
+ .len = 0x80,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ /* with DS==1 this is RAZ/WI */
+ .base = GICD_NSACR,
+ .len = 0x100,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ /* the next three blocks are RES0 if ARE=1 */
+ {
+ .base = GICD_SGIR,
+ .len = 4,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_CPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_SPENDSGIR,
+ .len = 0x10,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICD_IROUTER,
+ .len = 0x2000,
+ .bits_per_irq = 64,
+ .handle_mmio = handle_mmio_route_reg,
+ },
+ {
+ .base = GICD_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_SETBIT);
+}
+
+static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+
+ return vgic_handle_pending_reg(vcpu->kvm, mmio, offset,
+ target_redist_vcpu->vcpu_id,
+ ACCESS_WRITE_CLEARBIT);
+}
+
+static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ struct kvm_vcpu *target_redist_vcpu = private;
+ u32 *reg;
+
+ reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
+ target_redist_vcpu->vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ return false;
+}
+
+static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset,
+ void *private)
+{
+ u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ *(int *)private, offset >> 1);
+
+ return vgic_handle_cfg_reg(reg, mmio, offset); }
+
+static const struct mmio_range vgic_redist_sgi_ranges[] = {
+ {
+ .base = GICR_IGROUPR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_ISENABLER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_enable_reg_redist,
+ },
+ {
+ .base = GICR_ICENABLER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_enable_reg_redist,
+ },
+ {
+ .base = GICR_ISPENDR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_set_pending_reg_redist,
+ },
+ {
+ .base = GICR_ICPENDR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_clear_pending_reg_redist,
+ },
+ {
+ .base = GICR_ISACTIVER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_ICACTIVER0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IPRIORITYR0,
+ .len = 32,
+ .bits_per_irq = 8,
+ .handle_mmio = handle_mmio_priority_reg_redist,
+ },
+ {
+ .base = GICR_ICFGR0,
+ .len = 8,
+ .bits_per_irq = 2,
+ .handle_mmio = handle_mmio_cfg_reg_redist,
+ },
+ {
+ .base = GICR_IGRPMODR0,
+ .len = 4,
+ .bits_per_irq = 1,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_NSACR,
+ .len = 4,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {},
+};
+
+static bool handle_mmio_misc_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, void *private) {
+ u32 reg;
+ u32 word_offset = offset & 3;
+ u64 mpidr;
+ struct kvm_vcpu *target_redist_vcpu = private;
+ int target_vcpu_id = target_redist_vcpu->vcpu_id;
+
+ switch (offset & ~3) {
+ case GICR_CTLR:
+ /* since we don't support LPIs, this register is zero for now */
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ break;
+ case GICR_TYPER + 4:
+ mpidr = kvm_vcpu_get_mpidr(target_redist_vcpu);
+ reg = compress_mpidr(mpidr);
+
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ case GICR_TYPER:
+ reg = target_redist_vcpu->vcpu_id << 8;
+ if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+ reg |= GICR_TYPER_LAST;
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ case GICR_IIDR:
+ reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ vgic_reg_access(mmio, ®, word_offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ break;
+ default:
+ vgic_reg_access(mmio, NULL, word_offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ break;
+ }
+
+ return false;
+}
+
+static const struct mmio_range vgic_redist_ranges[] = {
+ { /*
+ * handling CTLR, IIDR, TYPER and STATUSR
+ */
+ .base = GICR_CTLR,
+ .len = 20,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_misc_redist,
+ },
+ {
+ .base = GICR_WAKER,
+ .len = 4,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
+ {
+ .base = GICR_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {},
+};
+
+/*
+ * this is the stub handling both dist and redist MMIO exits for v3
+ * does some vcpu_id calculation on the redist MMIO to use a possibly
+ * different VCPU than the current one
+ */
+static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long dbase = dist->vgic_dist_base;
+ unsigned long rdbase = dist->vgic_redist_base;
+ int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
+ int vcpu_id;
+ struct kvm_vcpu *target_redist_vcpu;
+
+ if (IS_IN_RANGE(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_dist_ranges, dbase, NULL);
+ }
+
+ if (!IS_IN_RANGE(mmio->phys_addr, mmio->len, rdbase,
+ GIC_V3_REDIST_SIZE * nrcpus))
+ return false;
+
+ vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
+ rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
+ target_redist_vcpu = kvm_get_vcpu(vcpu->kvm, vcpu_id);
+
+ if (mmio->phys_addr >= rdbase + 0x10000)
+ return vgic_handle_mmio_range(vcpu, run, mmio,
+ vgic_redist_sgi_ranges,
+ rdbase + 0x10000,
+ target_redist_vcpu);
+
+ return vgic_handle_mmio_range(vcpu, run, mmio, vgic_redist_ranges,
+ rdbase, target_redist_vcpu);
+}
+
+static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) {
+ if (vgic_queue_irq(vcpu, 0, irq)) {
+ vgic_dist_irq_clear(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, irq);
+ return true;
+ }
+
+ return false;
+}
+
+static int vgic_v3_init_maps(struct vgic_dist *dist, int nr_cpus, int
+nr_irqs) {
+ int nr_spis = nr_irqs - VGIC_NR_PRIVATE_IRQS;
+
+ dist->irq_spi_mpidr = kzalloc(nr_spis * sizeof(dist->irq_spi_mpidr[0]),
+ GFP_KERNEL);
+
+ if (!dist->irq_spi_mpidr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int vgic_v3_init(struct kvm *kvm, const struct vgic_params
+*params) {
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret, i;
+ u32 mpidr;
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ return -ENXIO;
+ }
+
+ dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+
+ ret = vgic_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (!ret)
+ ret = vgic_v3_init_maps(dist, dist->nr_cpus, dist->nr_irqs);
+ if (ret) {
+ kvm_err("Unable to allocate maps\n");
+ return ret;
+ }
+
+ mpidr = compress_mpidr(kvm_vcpu_get_mpidr(kvm_get_vcpu(kvm, 0)));
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
+ dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
+ dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
+ vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
+ }
+
+ return 0;
+}
+
+static void vgic_v3_unqueue_sgi(struct kvm_vcpu *vcpu, int irq, int
+source) {
+ return;
+}
+
+bool vgic_v3_init_emulation_ops(struct kvm *kvm, int type) {
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
+ dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
+ dist->vm_ops.unqueue_sgi = vgic_v3_unqueue_sgi;
+ dist->vm_ops.vgic_init = vgic_v3_init;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/*
+ * triggered by a system register access trap, called from the sysregs
+ * handling code there.
+ * The register contains the upper three affinity levels of the target
+ * processors as well as a bitmask of 16 Aff0 CPUs.
+ * Iterate over all VCPUs to check for matching ones or signal on
+ * all-but-self if the mode bit is set.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) {
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *c_vcpu;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u16 target_cpus;
+ u64 mpidr, mpidr_h, mpidr_l;
+ int sgi, mode, c, vcpu_id;
+ int updated = 0;
+
+ vcpu_id = vcpu->vcpu_id;
+
+ sgi = (reg >> 24) & 0xf;
+ mode = (reg >> 40) & 0x1;
+ target_cpus = reg & 0xffff;
+ mpidr = ((reg >> 48) & 0xff) << MPIDR_LEVEL_SHIFT(3);
+ mpidr |= ((reg >> 32) & 0xff) << MPIDR_LEVEL_SHIFT(2);
+ mpidr |= ((reg >> 16) & 0xff) << MPIDR_LEVEL_SHIFT(1);
+ mpidr &= ~MPIDR_LEVEL_MASK;
+
+ /*
+ * We take the dist lock here, because we come from the sysregs
+ * code path and not from MMIO (where this is already done)
+ */
+ spin_lock(&dist->lock);
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+ if (target_cpus == 0)
+ break;
+ if (mode && c == vcpu_id) /* not to myself */
+ continue;
+ if (!mode) {
+ mpidr_h = kvm_vcpu_get_mpidr(c_vcpu);
+ mpidr_l = MPIDR_AFFINITY_LEVEL(mpidr_h, 0);
+ mpidr_h &= ~MPIDR_LEVEL_MASK;
+ if (mpidr != mpidr_h)
+ continue;
+ if (!(target_cpus & BIT(mpidr_l)))
+ continue;
+ target_cpus &= ~BIT(mpidr_l);
+ }
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set(c_vcpu, sgi);
+ updated = 1;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+ }
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+ spin_unlock(&dist->lock);
+ if (updated)
+ vgic_kick_vcpus(vcpu->kvm);
+}
+
+
+static int vgic_v3_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int r;
+
+ r = vgic_get_common_attr(dev, attr);
+ if (!r)
+ return r;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ }
+
+ return r;
+}
+
+static int vgic_v3_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+
+ if (!ret)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v3_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return -ENXIO;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return -ENXIO;
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
+ return 0;
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v3_ops = {
+ .name = "kvm-arm-vgic-v3",
+ .create = vgic_create,
+ .destroy = vgic_destroy,
+ .set_attr = vgic_v3_set_attr,
+ .get_attr = vgic_v3_get_attr,
+ .has_attr = vgic_v3_has_attr,
+};
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 4c1a129..fa150c7 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1071,7 +1071,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
struct kvm_vcpu *vcpu;
int is_edge, is_level;
int enabled;
- bool ret = true;
+ bool ret = true, can_inject = true;
spin_lock(&dist->lock);
@@ -1086,6 +1086,11 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
+ if (cpuid == VCPU_NOT_ALLOCATED) {
+ /* Pretend we use CPU0, and prevent injection */
+ cpuid = 0;
+ can_inject = false;
+ }
vcpu = kvm_get_vcpu(kvm, cpuid);
}
@@ -1098,7 +1103,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
enabled = vgic_irq_is_enabled(vcpu, irq_num);
- if (!enabled) {
+ if (!enabled || !can_inject) {
ret = false;
goto out;
}
@@ -1314,6 +1319,7 @@ static void vgic_free_maps(struct vgic_dist *dist)
vgic_free_bitmap(&dist->irq_spi_target[i]);
kfree(dist->irq_sgi_sources);
kfree(dist->irq_spi_cpu);
+ kfree(dist->irq_spi_mpidr);
kfree(dist->irq_spi_target);
}
@@ -1463,6 +1469,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
init_emulation_ops(kvm, type);
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h index 9078e2a..f24cdd0 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -34,6 +34,8 @@
#define VCPU_NOT_ALLOCATED ((u8)-1)
+#define VCPU_NOT_ALLOCATED ((u8)-1)
+
unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
void vgic_update_state(struct kvm *kvm); @@ -112,3 +114,4 @@ int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
bool vgic_v2_init_emulation_ops(struct kvm *kvm, int type);
+bool vgic_v3_init_emulation_ops(struct kvm *kvm, int type);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4b6c01b..c2fac5d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2286,6 +2286,9 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
case KVM_DEV_TYPE_ARM_VGIC_V2:
ops = &kvm_arm_vgic_v2_ops;
break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ ops = &kvm_arm_vgic_v3_ops;
+ break;
#endif
#ifdef CONFIG_S390
case KVM_DEV_TYPE_FLIC:
--
1.7.9.5
_______________________________________________
kvmarm mailing list
kvmarm at lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 12/14] arm/arm64: KVM: add SGI system register trapping
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (10 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 11/14] arm/arm64: KVM: add virtual GICv3 distributor emulation Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 9:45 ` [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation Andre Przywara
` (2 subsequent siblings)
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
While the injection of a (virtual) inter-processor interrupt (SGI)
on a GICv2 works by writing to a MMIO register, GICv3 uses system
registers to trigger them.
Trap the appropriate registers both on ARM and ARM64 machines and
call the SGI handler function in the vGICv3 emulation code.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm/kvm/coproc.c | 19 +++++++++++++++++++
arch/arm64/kvm/sys_regs.c | 26 ++++++++++++++++++++++++++
2 files changed, 45 insertions(+)
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index c58a351..4adadb7 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -205,6 +205,22 @@ done:
return true;
}
+static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ u64 val;
+
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ val = *vcpu_reg(vcpu, p->Rt1);
+ val |= (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
+ vgic_v3_dispatch_sgi(vcpu, val);
+
+ return true;
+}
+
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
* is set.
@@ -376,6 +392,9 @@ static const struct coproc_reg cp15_regs[] = {
{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
access_vm_reg, reset_unknown, c10_AMAIR1},
+ /* ICC_SGI1R */
+ { CRm64(12), Op1( 0), is64, access_gic_sgi},
+
/* VBAR: swapped by interrupt.S. */
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c12_VBAR, 0x00000000 },
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index fa2273a..012f21a 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -164,6 +164,27 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
}
/*
+ * Trapping on the GICv3 SGI system register.
+ * Forward the request to the VGIC emulation.
+ * The cp15_64 code makes sure this automatically works
+ * for both AArch64 and AArch32 accesses.
+ */
+static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 val;
+
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ val = *vcpu_reg(vcpu, p->Rt);
+ vgic_v3_dispatch_sgi(vcpu, val);
+
+ return true;
+}
+
+/*
* We could trap ID_DFR0 and tell the guest we don't support performance
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
* NAKed, so it will read the PMCR anyway.
@@ -282,6 +303,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* VBAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
NULL, reset_val, VBAR_EL1, 0 },
+ /* ICC_SGI1R_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
+ access_gic_sgi },
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -374,6 +398,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
+ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
+
/*
* DC{C,I,CI}SW operations:
*/
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (11 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 12/14] arm/arm64: KVM: add SGI system register trapping Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
2014-06-19 21:43 ` Chalamarla, Tirumalesh
2014-06-19 9:45 ` [PATCH 14/14] arm/arm64: KVM: allow userland to request a virtual GICv3 Andre Przywara
[not found] ` <1403228344858.49974@caviumnetworks.com>
14 siblings, 1 reply; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
With all the necessary GICv3 emulation code in place, we can now
connect the code to the GICv3 backend in the kernel.
The LR register handling is different depending on the emulated GIC
model, so provide different implementations for each.
Also allow non-v2-compatible GICv3 implementations (which don't
provide MMIO regions for the virtual CPU interface in the DT), but
restrict those hosts to use GICv3 guests only.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic-v3.c | 138 ++++++++++++++++++++++++++++++++++++++----------
virt/kvm/arm/vgic.c | 2 +
2 files changed, 112 insertions(+), 28 deletions(-)
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 7d9c85e..d26d12f 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -34,6 +34,7 @@
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
/*
* LRs are stored in reverse order in memory. make sure we index them
@@ -43,7 +44,35 @@
static u32 ich_vtr_el2;
-static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
+static u64 sync_lr_val(u8 state)
+{
+ u64 lr_val = 0;
+
+ if (state & LR_STATE_PENDING)
+ lr_val |= ICH_LR_PENDING_BIT;
+ if (state & LR_STATE_ACTIVE)
+ lr_val |= ICH_LR_ACTIVE_BIT;
+ if (state & LR_EOI_INT)
+ lr_val |= ICH_LR_EOI;
+
+ return lr_val;
+}
+
+static u8 sync_lr_state(u64 lr_val)
+{
+ u8 state = 0;
+
+ if (lr_val & ICH_LR_PENDING_BIT)
+ state |= LR_STATE_PENDING;
+ if (lr_val & ICH_LR_ACTIVE_BIT)
+ state |= LR_STATE_ACTIVE;
+ if (lr_val & ICH_LR_EOI)
+ state |= LR_EOI_INT;
+
+ return state;
+}
+
+static struct vgic_lr vgic_v2_on_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
{
struct vgic_lr lr_desc;
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
@@ -53,30 +82,53 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
else
lr_desc.source = 0;
- lr_desc.state = 0;
+ lr_desc.state = sync_lr_state(val);
- if (val & ICH_LR_PENDING_BIT)
- lr_desc.state |= LR_STATE_PENDING;
- if (val & ICH_LR_ACTIVE_BIT)
- lr_desc.state |= LR_STATE_ACTIVE;
- if (val & ICH_LR_EOI)
- lr_desc.state |= LR_EOI_INT;
+ return lr_desc;
+}
+
+static struct vgic_lr vgic_v3_on_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+ struct vgic_lr lr_desc;
+ u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
+
+ lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
+ lr_desc.source = 0;
+ lr_desc.state = sync_lr_state(val);
return lr_desc;
}
-static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
+static void vgic_v3_on_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
{
- u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
- lr_desc.irq);
+ u64 lr_val;
- if (lr_desc.state & LR_STATE_PENDING)
- lr_val |= ICH_LR_PENDING_BIT;
- if (lr_desc.state & LR_STATE_ACTIVE)
- lr_val |= ICH_LR_ACTIVE_BIT;
- if (lr_desc.state & LR_EOI_INT)
- lr_val |= ICH_LR_EOI;
+ lr_val = lr_desc.irq;
+
+ /*
+ * currently all guest IRQs are Group1, as Group0 would result
+ * in a FIQ in the guest, which it wouldn't expect.
+ * Eventually we want to make this configurable, so we may revisit
+ * this in the future.
+ */
+ lr_val |= ICH_LR_GROUP;
+
+ lr_val |= sync_lr_val(lr_desc.state);
+
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
+}
+
+static void vgic_v2_on_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
+{
+ u64 lr_val;
+
+ lr_val = lr_desc.irq;
+
+ lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+
+ lr_val |= sync_lr_val(lr_desc.state);
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
}
@@ -145,9 +197,8 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
static void vgic_v3_enable(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *vgic_v3;
+ struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
- vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero
@@ -155,7 +206,14 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
*/
vgic_v3->vgic_vmcr = 0;
- vgic_v3->vgic_sre = 0;
+ /*
+ * Set the SRE_EL1 value depending on the configured
+ * emulated vGIC model.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+ else
+ vgic_v3->vgic_sre = 0;
/* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN;
@@ -173,6 +231,15 @@ static const struct vgic_ops vgic_v3_ops = {
.enable = vgic_v3_enable,
};
+static void init_vgic_v3_emul(struct kvm *kvm)
+{
+ struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
+
+ vm_ops->get_lr = vgic_v3_on_v3_get_lr;
+ vm_ops->set_lr = vgic_v3_on_v3_set_lr;
+ kvm->arch.max_vcpus = KVM_MAX_VCPUS;
+}
+
static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type)
{
struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
@@ -184,14 +251,28 @@ static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type)
if (nr_vcpus > 8)
return false;
- vm_ops->get_lr = vgic_v3_get_lr;
- vm_ops->set_lr = vgic_v3_set_lr;
+ vm_ops->get_lr = vgic_v2_on_v3_get_lr;
+ vm_ops->set_lr = vgic_v2_on_v3_set_lr;
kvm->arch.max_vcpus = 8;
return true;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ init_vgic_v3_emul(kvm);
+ return true;
}
return false;
}
+static bool vgic_v3_init_emul(struct kvm *kvm, int type)
+{
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ init_vgic_v3_emul(kvm);
+ return true;
+ }
+
+ return false;
+}
+
static struct vgic_params vgic_v3_params;
/**
@@ -233,12 +314,13 @@ int vgic_v3_probe(struct device_node *vgic_node,
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
- kvm_err("Cannot obtain GICV region\n");
- ret = -ENXIO;
- goto out;
+ kvm_info("GICv3: GICv2 emulation not available\n");
+ vgic->vcpu_base = 0;
+ vgic->init_emul = vgic_v3_init_emul;
+ } else {
+ vgic->vcpu_base = vcpu_res.start;
+ vgic->init_emul = vgic_v3_init_emul_compat;
}
- vgic->init_emul = vgic_v3_init_emul_compat;
- vgic->vcpu_base = vcpu_res.start;
vgic->vctrl_base = NULL;
vgic->type = VGIC_V3;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index fa150c7..8a584e0 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1424,6 +1424,8 @@ static bool init_emulation_ops(struct kvm *kvm, int type)
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
return vgic_v2_init_emulation_ops(kvm, type);
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ return vgic_v3_init_emulation_ops(kvm, type);
}
return false;
}
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation
2014-06-19 9:45 ` [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation Andre Przywara
@ 2014-06-19 21:43 ` Chalamarla, Tirumalesh
2014-06-20 8:46 ` Andre Przywara
0 siblings, 1 reply; 25+ messages in thread
From: Chalamarla, Tirumalesh @ 2014-06-19 21:43 UTC (permalink / raw)
To: linux-arm-kernel
-----Original Message-----
From: kvmarm-bounces@lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
Sent: Thursday, June 19, 2014 2:46 AM
To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
Cc: christoffer.dall at linaro.org
Subject: [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation
With all the necessary GICv3 emulation code in place, we can now connect the code to the GICv3 backend in the kernel.
The LR register handling is different depending on the emulated GIC model, so provide different implementations for each.
Also allow non-v2-compatible GICv3 implementations (which don't provide MMIO regions for the virtual CPU interface in the DT), but restrict those hosts to use GICv3 guests only.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
virt/kvm/arm/vgic-v3.c | 138 ++++++++++++++++++++++++++++++++++++++----------
virt/kvm/arm/vgic.c | 2 +
2 files changed, 112 insertions(+), 28 deletions(-)
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 7d9c85e..d26d12f 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -34,6 +34,7 @@
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
/*
* LRs are stored in reverse order in memory. make sure we index them @@ -43,7 +44,35 @@
static u32 ich_vtr_el2;
-static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
+static u64 sync_lr_val(u8 state)
+{
+ u64 lr_val = 0;
+
+ if (state & LR_STATE_PENDING)
+ lr_val |= ICH_LR_PENDING_BIT;
+ if (state & LR_STATE_ACTIVE)
+ lr_val |= ICH_LR_ACTIVE_BIT;
+ if (state & LR_EOI_INT)
+ lr_val |= ICH_LR_EOI;
+
+ return lr_val;
+}
+
+static u8 sync_lr_state(u64 lr_val)
+{
+ u8 state = 0;
+
+ if (lr_val & ICH_LR_PENDING_BIT)
+ state |= LR_STATE_PENDING;
+ if (lr_val & ICH_LR_ACTIVE_BIT)
+ state |= LR_STATE_ACTIVE;
+ if (lr_val & ICH_LR_EOI)
+ state |= LR_EOI_INT;
+
+ return state;
+}
+
+static struct vgic_lr vgic_v2_on_v3_get_lr(const struct kvm_vcpu *vcpu,
+int lr)
{
struct vgic_lr lr_desc;
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
@@ -53,30 +82,53 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
else
lr_desc.source = 0;
- lr_desc.state = 0;
+ lr_desc.state = sync_lr_state(val);
- if (val & ICH_LR_PENDING_BIT)
- lr_desc.state |= LR_STATE_PENDING;
- if (val & ICH_LR_ACTIVE_BIT)
- lr_desc.state |= LR_STATE_ACTIVE;
- if (val & ICH_LR_EOI)
- lr_desc.state |= LR_EOI_INT;
+ return lr_desc;
+}
+
+static struct vgic_lr vgic_v3_on_v3_get_lr(const struct kvm_vcpu *vcpu,
+int lr) {
+ struct vgic_lr lr_desc;
+ u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
+
+ lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
+ lr_desc.source = 0;
+ lr_desc.state = sync_lr_state(val);
return lr_desc;
}
-static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
+static void vgic_v3_on_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
{
- u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
- lr_desc.irq);
+ u64 lr_val;
- if (lr_desc.state & LR_STATE_PENDING)
- lr_val |= ICH_LR_PENDING_BIT;
- if (lr_desc.state & LR_STATE_ACTIVE)
- lr_val |= ICH_LR_ACTIVE_BIT;
- if (lr_desc.state & LR_EOI_INT)
- lr_val |= ICH_LR_EOI;
+ lr_val = lr_desc.irq;
+
+ /*
+ * currently all guest IRQs are Group1, as Group0 would result
+ * in a FIQ in the guest, which it wouldn't expect.
+ * Eventually we want to make this configurable, so we may revisit
+ * this in the future.
+ */
+ lr_val |= ICH_LR_GROUP;
+
+ lr_val |= sync_lr_val(lr_desc.state);
+
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; }
+
+static void vgic_v2_on_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
+{
+ u64 lr_val;
+
+ lr_val = lr_desc.irq;
+
+ lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+
+ lr_val |= sync_lr_val(lr_desc.state);
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; } @@ -145,9 +197,8 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
static void vgic_v3_enable(struct kvm_vcpu *vcpu) {
- struct vgic_v3_cpu_if *vgic_v3;
+ struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
- vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero @@ -155,7 +206,14 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
*/
vgic_v3->vgic_vmcr = 0;
- vgic_v3->vgic_sre = 0;
+ /*
+ * Set the SRE_EL1 value depending on the configured
+ * emulated vGIC model.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+ else
+ vgic_v3->vgic_sre = 0;
/* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN;
@@ -173,6 +231,15 @@ static const struct vgic_ops vgic_v3_ops = {
.enable = vgic_v3_enable,
};
+static void init_vgic_v3_emul(struct kvm *kvm) {
+ struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
+
+ vm_ops->get_lr = vgic_v3_on_v3_get_lr;
+ vm_ops->set_lr = vgic_v3_on_v3_set_lr;
+ kvm->arch.max_vcpus = KVM_MAX_VCPUS;
+}
+
static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type) {
struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops; @@ -184,14 +251,28 @@ static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type)
if (nr_vcpus > 8)
return false;
- vm_ops->get_lr = vgic_v3_get_lr;
- vm_ops->set_lr = vgic_v3_set_lr;
+ vm_ops->get_lr = vgic_v2_on_v3_get_lr;
+ vm_ops->set_lr = vgic_v2_on_v3_set_lr;
kvm->arch.max_vcpus = 8;
return true;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ init_vgic_v3_emul(kvm);
+ return true;
}
return false;
}
+static bool vgic_v3_init_emul(struct kvm *kvm, int type) {
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ init_vgic_v3_emul(kvm);
+ return true;
+ }
+
+ return false;
+}
+
static struct vgic_params vgic_v3_params;
/**
@@ -233,12 +314,13 @@ int vgic_v3_probe(struct device_node *vgic_node,
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
- kvm_err("Cannot obtain GICV region\n");
- ret = -ENXIO;
- goto out;
+ kvm_info("GICv3: GICv2 emulation not available\n");
+ vgic->vcpu_base = 0;
+ vgic->init_emul = vgic_v3_init_emul;
+ } else {
+ vgic->vcpu_base = vcpu_res.start;
+ vgic->init_emul = vgic_v3_init_emul_compat;
}
Are we returning error while GICv2 emulation is request from user when it is not supported, I might have missed the code.
- vgic->init_emul = vgic_v3_init_emul_compat;
- vgic->vcpu_base = vcpu_res.start;
vgic->vctrl_base = NULL;
vgic->type = VGIC_V3;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index fa150c7..8a584e0 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1424,6 +1424,8 @@ static bool init_emulation_ops(struct kvm *kvm, int type)
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
return vgic_v2_init_emulation_ops(kvm, type);
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ return vgic_v3_init_emulation_ops(kvm, type);
}
return false;
}
--
1.7.9.5
_______________________________________________
kvmarm mailing list
kvmarm at lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation
2014-06-19 21:43 ` Chalamarla, Tirumalesh
@ 2014-06-20 8:46 ` Andre Przywara
0 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-20 8:46 UTC (permalink / raw)
To: linux-arm-kernel
On 19/06/14 22:43, Chalamarla, Tirumalesh wrote:
>
>
> -----Original Message-----
> From: kvmarm-bounces at lists.cs.columbia.edu [mailto:kvmarm-bounces at lists.cs.columbia.edu] On Behalf Of Andre Przywara
> Sent: Thursday, June 19, 2014 2:46 AM
> To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
> Cc: christoffer.dall at linaro.org
> Subject: [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation
>
> With all the necessary GICv3 emulation code in place, we can now connect the code to the GICv3 backend in the kernel.
> The LR register handling is different depending on the emulated GIC model, so provide different implementations for each.
> Also allow non-v2-compatible GICv3 implementations (which don't provide MMIO regions for the virtual CPU interface in the DT), but restrict those hosts to use GICv3 guests only.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
> virt/kvm/arm/vgic-v3.c | 138 ++++++++++++++++++++++++++++++++++++++----------
> virt/kvm/arm/vgic.c | 2 +
> 2 files changed, 112 insertions(+), 28 deletions(-)
>
> diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 7d9c85e..d26d12f 100644
> --- a/virt/kvm/arm/vgic-v3.c
> +++ b/virt/kvm/arm/vgic-v3.c
> @@ -34,6 +34,7 @@
> #define GICH_LR_VIRTUALID (0x3ffUL << 0)
> #define GICH_LR_PHYSID_CPUID_SHIFT (10)
> #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
> +#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
>
> /*
> * LRs are stored in reverse order in memory. make sure we index them @@ -43,7 +44,35 @@
>
> static u32 ich_vtr_el2;
>
> -static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
> +static u64 sync_lr_val(u8 state)
> +{
> + u64 lr_val = 0;
> +
> + if (state & LR_STATE_PENDING)
> + lr_val |= ICH_LR_PENDING_BIT;
> + if (state & LR_STATE_ACTIVE)
> + lr_val |= ICH_LR_ACTIVE_BIT;
> + if (state & LR_EOI_INT)
> + lr_val |= ICH_LR_EOI;
> +
> + return lr_val;
> +}
> +
> +static u8 sync_lr_state(u64 lr_val)
> +{
> + u8 state = 0;
> +
> + if (lr_val & ICH_LR_PENDING_BIT)
> + state |= LR_STATE_PENDING;
> + if (lr_val & ICH_LR_ACTIVE_BIT)
> + state |= LR_STATE_ACTIVE;
> + if (lr_val & ICH_LR_EOI)
> + state |= LR_EOI_INT;
> +
> + return state;
> +}
> +
> +static struct vgic_lr vgic_v2_on_v3_get_lr(const struct kvm_vcpu *vcpu,
> +int lr)
> {
> struct vgic_lr lr_desc;
> u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
> @@ -53,30 +82,53 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
> lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
> else
> lr_desc.source = 0;
> - lr_desc.state = 0;
> + lr_desc.state = sync_lr_state(val);
>
> - if (val & ICH_LR_PENDING_BIT)
> - lr_desc.state |= LR_STATE_PENDING;
> - if (val & ICH_LR_ACTIVE_BIT)
> - lr_desc.state |= LR_STATE_ACTIVE;
> - if (val & ICH_LR_EOI)
> - lr_desc.state |= LR_EOI_INT;
> + return lr_desc;
> +}
> +
> +static struct vgic_lr vgic_v3_on_v3_get_lr(const struct kvm_vcpu *vcpu,
> +int lr) {
> + struct vgic_lr lr_desc;
> + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
> +
> + lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
> + lr_desc.source = 0;
> + lr_desc.state = sync_lr_state(val);
>
> return lr_desc;
> }
>
> -static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
> - struct vgic_lr lr_desc)
> +static void vgic_v3_on_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
> + struct vgic_lr lr_desc)
> {
> - u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
> - lr_desc.irq);
> + u64 lr_val;
>
> - if (lr_desc.state & LR_STATE_PENDING)
> - lr_val |= ICH_LR_PENDING_BIT;
> - if (lr_desc.state & LR_STATE_ACTIVE)
> - lr_val |= ICH_LR_ACTIVE_BIT;
> - if (lr_desc.state & LR_EOI_INT)
> - lr_val |= ICH_LR_EOI;
> + lr_val = lr_desc.irq;
> +
> + /*
> + * currently all guest IRQs are Group1, as Group0 would result
> + * in a FIQ in the guest, which it wouldn't expect.
> + * Eventually we want to make this configurable, so we may revisit
> + * this in the future.
> + */
> + lr_val |= ICH_LR_GROUP;
> +
> + lr_val |= sync_lr_val(lr_desc.state);
> +
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; }
> +
> +static void vgic_v2_on_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
> + struct vgic_lr lr_desc)
> +{
> + u64 lr_val;
> +
> + lr_val = lr_desc.irq;
> +
> + lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
> +
> + lr_val |= sync_lr_val(lr_desc.state);
>
> vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; } @@ -145,9 +197,8 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>
> static void vgic_v3_enable(struct kvm_vcpu *vcpu) {
> - struct vgic_v3_cpu_if *vgic_v3;
> + struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
>
> - vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
> /*
> * By forcing VMCR to zero, the GIC will restore the binary
> * points to their reset values. Anything else resets to zero @@ -155,7 +206,14 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
> */
> vgic_v3->vgic_vmcr = 0;
>
> - vgic_v3->vgic_sre = 0;
> + /*
> + * Set the SRE_EL1 value depending on the configured
> + * emulated vGIC model.
> + */
> + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
> + vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
> + else
> + vgic_v3->vgic_sre = 0;
>
> /* Get the show on the road... */
> vgic_v3->vgic_hcr = ICH_HCR_EN;
> @@ -173,6 +231,15 @@ static const struct vgic_ops vgic_v3_ops = {
> .enable = vgic_v3_enable,
> };
>
> +static void init_vgic_v3_emul(struct kvm *kvm) {
> + struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops;
> +
> + vm_ops->get_lr = vgic_v3_on_v3_get_lr;
> + vm_ops->set_lr = vgic_v3_on_v3_set_lr;
> + kvm->arch.max_vcpus = KVM_MAX_VCPUS;
> +}
> +
> static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type) {
> struct vgic_vm_ops *vm_ops = &kvm->arch.vgic.vm_ops; @@ -184,14 +251,28 @@ static bool vgic_v3_init_emul_compat(struct kvm *kvm, int type)
> if (nr_vcpus > 8)
> return false;
>
> - vm_ops->get_lr = vgic_v3_get_lr;
> - vm_ops->set_lr = vgic_v3_set_lr;
> + vm_ops->get_lr = vgic_v2_on_v3_get_lr;
> + vm_ops->set_lr = vgic_v2_on_v3_set_lr;
> kvm->arch.max_vcpus = 8;
> return true;
> + case KVM_DEV_TYPE_ARM_VGIC_V3:
> + init_vgic_v3_emul(kvm);
> + return true;
> }
> return false;
> }
>
> +static bool vgic_v3_init_emul(struct kvm *kvm, int type) {
> + switch (type) {
> + case KVM_DEV_TYPE_ARM_VGIC_V3:
> + init_vgic_v3_emul(kvm);
> + return true;
> + }
> +
> + return false;
> +}
> +
> static struct vgic_params vgic_v3_params;
>
> /**
> @@ -233,12 +314,13 @@ int vgic_v3_probe(struct device_node *vgic_node,
>
> gicv_idx += 3; /* Also skip GICD, GICC, GICH */
> if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
> - kvm_err("Cannot obtain GICV region\n");
> - ret = -ENXIO;
> - goto out;
> + kvm_info("GICv3: GICv2 emulation not available\n");
> + vgic->vcpu_base = 0;
> + vgic->init_emul = vgic_v3_init_emul;
> + } else {
> + vgic->vcpu_base = vcpu_res.start;
> + vgic->init_emul = vgic_v3_init_emul_compat;
> }
>
> Are we returning error while GICv2 emulation is request from user when it is not supported, I might have missed the code.
Yes, we do. Actually it is right here, with the different init_emul()
functions. Check their implementations above, we only allow
KVM_DEV_TYPE_ARM_VGIC_V3 for the fist case here.
Cheers,
Andre.
>
>
> - vgic->init_emul = vgic_v3_init_emul_compat;
> - vgic->vcpu_base = vcpu_res.start;
> vgic->vctrl_base = NULL;
> vgic->type = VGIC_V3;
>
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index fa150c7..8a584e0 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1424,6 +1424,8 @@ static bool init_emulation_ops(struct kvm *kvm, int type)
> switch (type) {
> case KVM_DEV_TYPE_ARM_VGIC_V2:
> return vgic_v2_init_emulation_ops(kvm, type);
> + case KVM_DEV_TYPE_ARM_VGIC_V3:
> + return vgic_v3_init_emulation_ops(kvm, type);
> }
> return false;
> }
> --
> 1.7.9.5
>
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
>
-- IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2557590
ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2548782
^ permalink raw reply [flat|nested] 25+ messages in thread
* [PATCH 14/14] arm/arm64: KVM: allow userland to request a virtual GICv3
2014-06-19 9:45 [PATCH 00/14] KVM GICv3 emulation Andre Przywara
` (12 preceding siblings ...)
2014-06-19 9:45 ` [PATCH 13/14] arm/arm64: KVM: enable kernel side of GICv3 emulation Andre Przywara
@ 2014-06-19 9:45 ` Andre Przywara
[not found] ` <1403228344858.49974@caviumnetworks.com>
14 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-19 9:45 UTC (permalink / raw)
To: linux-arm-kernel
With everything in place we allow userland to request the kernel
using a virtual GICv3 in the guest, which finally lifts the 8 vCPU
limit for a guest.
Also we provide the necessary support for guests setting the memory
addresses for the virtual distributor and redistributors.
This requires some userland code to make use of that feature and
explicitly ask for a virtual GICv3.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
arch/arm64/include/uapi/asm/kvm.h | 7 ++++++
include/kvm/arm_vgic.h | 4 ++--
virt/kvm/arm/vgic-v3-emul.c | 3 +++
virt/kvm/arm/vgic.c | 46 ++++++++++++++++++++++++++-----------
4 files changed, 45 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 5513de4..9a62081 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -77,6 +77,13 @@ struct kvm_regs {
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000
+/* Supported VGICv3 address types */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+
+#define KVM_VGIC_V3_DIST_SIZE SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 3b164ee..82e00a5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -35,8 +35,8 @@
#define VGIC_MAX_IRQS 1024
/* Sanity checks... */
-#if (KVM_MAX_VCPUS > 8)
-#error Invalid number of CPU interfaces
+#if (KVM_MAX_VCPUS > 255)
+#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
#endif
#if (VGIC_NR_IRQS_LEGACY & 31)
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index 68821fd..88c2cca 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -873,6 +873,9 @@ static int vgic_v3_has_attr(struct kvm_device *dev,
case KVM_VGIC_V2_ADDR_TYPE_DIST:
case KVM_VGIC_V2_ADDR_TYPE_CPU:
return -ENXIO;
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ return 0;
}
break;
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 8a584e0..e3c7189 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1526,7 +1526,7 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
/**
* kvm_vgic_addr - set or get vgic VM base addresses
* @kvm: pointer to the vm struct
- * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
+ * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
* @addr: pointer to address value
* @write: if true set the address in the VM address space, if false read the
* address
@@ -1540,29 +1540,49 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
{
int r = 0;
struct vgic_dist *vgic = &kvm->arch.vgic;
+ int type_needed;
+ phys_addr_t *addr_ptr, block_size;
mutex_lock(&kvm->lock);
switch (type) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
- if (write) {
- r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
- *addr, KVM_VGIC_V2_DIST_SIZE);
- } else {
- *addr = vgic->vgic_dist_base;
- }
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_dist_base;
+ block_size = KVM_VGIC_V2_DIST_SIZE;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
- if (write) {
- r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
- *addr, KVM_VGIC_V2_CPU_SIZE);
- } else {
- *addr = vgic->vgic_cpu_base;
- }
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_cpu_base;
+ block_size = KVM_VGIC_V2_CPU_SIZE;
break;
+#ifdef CONFIG_ARM_GIC_V3
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_dist_base;
+ block_size = KVM_VGIC_V3_DIST_SIZE;
+ break;
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_redist_base;
+ block_size = KVM_VGIC_V3_REDIST_SIZE;
+ break;
+#endif
default:
r = -ENODEV;
+ goto out;
+ }
+
+ if (vgic->vgic_model != type_needed) {
+ r = -ENODEV;
+ goto out;
}
+ if (write)
+ r = vgic_ioaddr_assign(kvm, addr_ptr, *addr, block_size);
+ else
+ *addr = *addr_ptr;
+
+out:
mutex_unlock(&kvm->lock);
return r;
}
--
1.7.9.5
^ permalink raw reply related [flat|nested] 25+ messages in thread
[parent not found: <1403228344858.49974@caviumnetworks.com>]
* [PATCH 00/14] KVM GICv3 emulation
[not found] ` <1403228344858.49974@caviumnetworks.com>
@ 2014-06-20 8:20 ` Andre Przywara
0 siblings, 0 replies; 25+ messages in thread
From: Andre Przywara @ 2014-06-20 8:20 UTC (permalink / raw)
To: linux-arm-kernel
On 20/06/14 02:39, Chalamarla, Tirumalesh wrote:
Hi Tirumalesh,
> Is there a public repo where we can get this patches from easily.
Indeed, many thanks to Marc who hosts now my patches on his kernel.org
repo. Simply checkout the gicv3/kvm-guest branch from
git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git
to get all of GICv3 host, vgic-dyn and the GICv3 guest emualation stuff
at once.
Regards,
Andre.
> ________________________________________
> From: kvmarm-bounces at lists.cs.columbia.edu <kvmarm-bounces@lists.cs.columbia.edu> on behalf of Andre Przywara <andre.przywara@arm.com>
> Sent: Thursday, June 19, 2014 3:15 PM
> To: linux-arm-kernel at lists.infradead.org; kvmarm at lists.cs.columbia.edu; kvm at vger.kernel.org
> Cc: christoffer.dall at linaro.org
> Subject: [PATCH 00/14] KVM GICv3 emulation
>
> GICv3 is the ARM generic interrupt controller designed to overcome
> some limits of the prevalent GICv2. Most notably it lifts the 8-CPU
> limit. Though with recent patches from Marc there is support for
> hosts to use a GICv3, the CPU limitation still applies to KVM guests,
> since the current code emulates a GICv2 only.
> Also, GICv2 backward compatibility being optional in GICv3, a number
> of systems won't be able to run GICv2 guests.
>
> This patch series provides code to emulate a GICv3 distributor and
> redistributor for any KVM guest. It requires a GICv3 in the host to
> work. With those patches one can run guests efficiently on any GICv3
> host. It has the following features:
> - Affinity routing (support for up to 255 VCPUs, more possible)
> - System registers (as opposed to MMIO access)
> - No ITS
> - No priority support (as the GICv2 emulation)
> - No save / restore support so far (will be added soon)
>
> The first 10 patches actually refactor the current VGIC code to make
> room for a different VGIC model to be dropped in with Patch 11/14.
> The remaining patches connect the new model to the kernel backend and
> the userland facing code.
>
> The series goes on top of both Marc's GICv3 host support series as
> well as his vgic-dyn patches.
> The necessary patches for kvmtool to enable the guest's GICv3 will be
> posted here as well.
> There was some testing on the fast model with some I/O and interrupt
> affinity shuffling in a Linux guest with a varying number of VCPUs.
>
> Please review and test.
> I would be grateful for people to test for GICv2 regressions also
> (so on a GICv2 host with current kvmtool/qemu), as there is quite
> some refactoring on that front.
>
> Much of the code was inspired by Marc, so send all praises to him
> (while I take the blame).
>
> Cheers,
> Andre.
>
> Andre Przywara (14):
> arm/arm64: KVM: rework MPIDR assignment and add accessors
> arm/arm64: KVM: pass down user space provided GIC type into vGIC code
> arm/arm64: KVM: refactor vgic_handle_mmio() function
> arm/arm64: KVM: wrap 64 bit MMIO accesses with two 32 bit ones
> arm/arm64: KVM: introduce per-VM ops
> arm/arm64: KVM: make the maximum number of vCPUs a per-VM value
> arm/arm64: KVM: make the value of ICC_SRE_EL1 a per-VM variable
> arm/arm64: KVM: refactor MMIO accessors
> arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
> arm/arm64: KVM: add opaque private pointer to MMIO accessors
> arm/arm64: KVM: add virtual GICv3 distributor emulation
> arm/arm64: KVM: add SGI system register trapping
> arm/arm64: KVM: enable kernel side of GICv3 emulation
> arm/arm64: KVM: allow userland to request a virtual GICv3
>
> arch/arm/include/asm/kvm_emulate.h | 2 +-
> arch/arm/include/asm/kvm_host.h | 3 +
> arch/arm/kvm/Makefile | 1 +
> arch/arm/kvm/arm.c | 23 +-
> arch/arm/kvm/coproc.c | 19 +
> arch/arm/kvm/psci.c | 15 +-
> arch/arm64/include/asm/kvm_emulate.h | 3 +-
> arch/arm64/include/asm/kvm_host.h | 5 +
> arch/arm64/include/uapi/asm/kvm.h | 7 +
> arch/arm64/kernel/asm-offsets.c | 1 +
> arch/arm64/kvm/Makefile | 2 +
> arch/arm64/kvm/sys_regs.c | 37 +-
> arch/arm64/kvm/vgic-v3-switch.S | 14 +-
> include/kvm/arm_vgic.h | 38 +-
> include/linux/irqchip/arm-gic-v3.h | 26 +
> include/linux/kvm_host.h | 1 +
> include/uapi/linux/kvm.h | 1 +
> virt/kvm/arm/vgic-v2-emul.c | 802 +++++++++++++++++++++++++++
> virt/kvm/arm/vgic-v2.c | 22 +-
> virt/kvm/arm/vgic-v3-emul.c | 898 ++++++++++++++++++++++++++++++
> virt/kvm/arm/vgic-v3.c | 157 +++++-
> virt/kvm/arm/vgic.c | 1017 +++++++---------------------------
> virt/kvm/arm/vgic.h | 117 ++++
> virt/kvm/kvm_main.c | 3 +
> 24 files changed, 2346 insertions(+), 868 deletions(-)
> create mode 100644 virt/kvm/arm/vgic-v2-emul.c
> create mode 100644 virt/kvm/arm/vgic-v3-emul.c
> create mode 100644 virt/kvm/arm/vgic.h
>
> --
> 1.7.9.5
>
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
>
>
-- IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2557590
ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, Registered in England & Wales, Company No: 2548782
^ permalink raw reply [flat|nested] 25+ messages in thread