From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoffer Dall Subject: [GIT PULL 07/51] arm64: KVM: allow export and import of generic timer regs Date: Mon, 4 Aug 2014 10:46:24 +0200 Message-ID: <1407142028-31105-8-git-send-email-christoffer.dall@linaro.org> References: <1407142028-31105-1-git-send-email-christoffer.dall@linaro.org> Mime-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: QUOTED-PRINTABLE Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org, =?UTF-8?q?Alex=20Benn=C3=A9e?= , Christoffer Dall To: Paolo Bonzini , Gleb Natapov Return-path: Received: from mail-la0-f43.google.com ([209.85.215.43]:63347 "EHLO mail-la0-f43.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751716AbaHDIrX (ORCPT ); Mon, 4 Aug 2014 04:47:23 -0400 Received: by mail-la0-f43.google.com with SMTP id hr17so5086293lab.2 for ; Mon, 04 Aug 2014 01:47:21 -0700 (PDT) In-Reply-To: <1407142028-31105-1-git-send-email-christoffer.dall@linaro.org> Sender: kvm-owner@vger.kernel.org List-ID: =46rom: Alex Benn=C3=A9e =46or correct guest suspend/resume behaviour we need to ensure we inclu= de the generic timer registers for 64 bit guests. As CONFIG_KVM_ARM_TIMER = is always set for arm64 we don't need to worry about null implementations. However I have re-jigged the kvm_arm_timer_set/get_reg declarations to be in the common include/kvm/arm_arch_timer.h headers. Acked-by: Marc Zyngier Reviewed-by: Christoffer Dall Signed-off-by: Alex Benn=C3=A9e Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_host.h | 3 -- arch/arm/kvm/guest.c | 10 ------ arch/arm64/kvm/guest.c | 68 +++++++++++++++++++++++++++++++++= +++++++- include/kvm/arm_arch_timer.h | 14 +++++++++ 4 files changed, 81 insertions(+), 14 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm= _host.h index 193ceaf..dc4e3ed 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -228,7 +228,4 @@ static inline int kvm_arch_dev_ioctl_check_extensio= n(long ext) int kvm_perf_init(void); int kvm_perf_teardown(void); =20 -u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); -int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); - #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index b23a59c..986e625 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -124,16 +124,6 @@ static bool is_timer_reg(u64 index) return false; } =20 -int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) -{ - return 0; -} - -u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) -{ - return 0; -} - #else =20 #define NUM_TIMER_REGS 3 diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 60b5c31..8d1ec28 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -136,13 +136,67 @@ static unsigned long num_core_regs(void) } =20 /** + * ARM64 versions of the TIMER registers, always available on arm64 + */ + +#define NUM_TIMER_REGS 3 + +static bool is_timer_reg(u64 index) +{ + switch (index) { + case KVM_REG_ARM_TIMER_CTL: + case KVM_REG_ARM_TIMER_CNT: + case KVM_REG_ARM_TIMER_CVAL: + return true; + } + return false; +} + +static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindi= ces) +{ + if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) + return -EFAULT; + uindices++; + if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) + return -EFAULT; + uindices++; + if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) + return -EFAULT; + + return 0; +} + +static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_r= eg *reg) +{ + void __user *uaddr =3D (void __user *)(long)reg->addr; + u64 val; + int ret; + + ret =3D copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); + if (ret !=3D 0) + return ret; + + return kvm_arm_timer_set_reg(vcpu, reg->id, val); +} + +static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_r= eg *reg) +{ + void __user *uaddr =3D (void __user *)(long)reg->addr; + u64 val; + + val =3D kvm_arm_timer_get_reg(vcpu, reg->id); + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); +} + +/** * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE= _REG * * This is for all registers. */ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { - return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); + return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) + + NUM_TIMER_REGS; } =20 /** @@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu,= u64 __user *uindices) { unsigned int i; const u64 core_reg =3D KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM= _CORE; + int ret; =20 for (i =3D 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { if (put_user(core_reg | i, uindices)) @@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu= , u64 __user *uindices) uindices++; } =20 + ret =3D copy_timer_indices(vcpu, uindices); + if (ret) + return ret; + uindices +=3D NUM_TIMER_REGS; + return kvm_arm_copy_sys_reg_indices(vcpu, uindices); } =20 @@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const st= ruct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) =3D=3D KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); =20 + if (is_timer_reg(reg->id)) + return get_timer_reg(vcpu, reg); + return kvm_arm_sys_reg_get_reg(vcpu, reg); } =20 @@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const st= ruct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) =3D=3D KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); =20 + if (is_timer_reg(reg->id)) + return set_timer_reg(vcpu, reg); + return kvm_arm_sys_reg_set_reg(vcpu, reg); } =20 diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.= h index 6d9aedd..ad9db60 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -67,6 +67,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); + +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); + #else static inline int kvm_timer_hyp_init(void) { @@ -84,6 +88,16 @@ static inline void kvm_timer_vcpu_init(struct kvm_vc= pu *vcpu) {} static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} + +static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 reg= id, u64 value) +{ + return 0; +} + +static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 reg= id) +{ + return 0; +} #endif =20 #endif --=20 2.0.0