From: christoffer.dall@arm.com (Christoffer Dall)
To: linux-arm-kernel@lists.infradead.org
Subject: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface
Date: Mon, 6 Aug 2018 15:25:57 +0200 [thread overview]
Message-ID: <20180806132557.GD5985@e113682-lin.lund.arm.com> (raw)
In-Reply-To: <1529593060-542-15-git-send-email-Dave.Martin@arm.com>
On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> This patch adds the following registers for access via the
> KVM_{GET,SET}_ONE_REG interface:
>
> * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
> * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
> * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
>
> In order to adapt gracefully to future architectural extensions,
> the registers are divided up into slices as noted above: the i
> parameter denotes the slice index.
>
> For simplicity, bits or slices that exceed the maximum vector
> length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> read as zero for KVM_GET_ONE_REG.
>
> For the current architecture, only slice i = 0 is significant. The
> interface design allows i to increase to up to 31 in the future if
> required by future architectural amendments.
>
> The registers are only visible for vcpus that have SVE enabled.
> They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> have SVE. In all cases, surplus slices are not enumerated by
> KVM_GET_REG_LIST.
>
> Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> redirected to access the underlying vcpu SVE register storage as
> appropriate. In order to make this more straightforward, register
> accesses that straddle register boundaries are no longer guaranteed
> to succeed. (Support for such use was never deliberate, and
> userspace does not currently seem to be relying on it.)
Could you add documentation to Documentation/virtual/kvm/api.txt for
this as well under the KVM_SET_ONE_REG definitions explaining the use
for arm64 ?
Thanks,
-Christoffer
>
> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> ---
> arch/arm64/include/uapi/asm/kvm.h | 10 ++
> arch/arm64/kvm/guest.c | 219 +++++++++++++++++++++++++++++++++++---
> 2 files changed, 216 insertions(+), 13 deletions(-)
>
> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> index 4e76630..f54a9b0 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -213,6 +213,16 @@ struct kvm_arch_memory_slot {
> KVM_REG_ARM_FW | ((r) & 0xffff))
> #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
>
> +/* SVE registers */
> +#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
> +#define KVM_REG_ARM64_SVE_ZREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
> + KVM_REG_SIZE_U2048 | \
> + ((n) << 5) | (i))
> +#define KVM_REG_ARM64_SVE_PREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
> + KVM_REG_SIZE_U256 | \
> + ((n) << 5) | (i) | 0x400)
> +#define KVM_REG_ARM64_SVE_FFR(i) KVM_REG_ARM64_SVE_PREG(16, i)
> +
> /* Device Control API: ARM VGIC */
> #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
> #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index 4a9d77c..005394b 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -23,14 +23,19 @@
> #include <linux/err.h>
> #include <linux/kvm_host.h>
> #include <linux/module.h>
> +#include <linux/uaccess.h>
> #include <linux/vmalloc.h>
> #include <linux/fs.h>
> +#include <linux/stddef.h>
> #include <kvm/arm_psci.h>
> #include <asm/cputype.h>
> #include <linux/uaccess.h>
> +#include <asm/fpsimd.h>
> #include <asm/kvm.h>
> #include <asm/kvm_emulate.h>
> #include <asm/kvm_coproc.h>
> +#include <asm/kvm_host.h>
> +#include <asm/sigcontext.h>
>
> #include "trace.h"
>
> @@ -57,6 +62,106 @@ static u64 core_reg_offset_from_id(u64 id)
> return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
> }
>
> +static bool is_zreg(const struct kvm_one_reg *reg)
> +{
> + return reg->id >= KVM_REG_ARM64_SVE_ZREG(0, 0) &&
> + reg->id <= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS, 0x1f);
> +}
> +
> +static bool is_preg(const struct kvm_one_reg *reg)
> +{
> + return reg->id >= KVM_REG_ARM64_SVE_PREG(0, 0) &&
> + reg->id <= KVM_REG_ARM64_SVE_FFR(0x1f);
> +}
> +
> +static unsigned int sve_reg_num(const struct kvm_one_reg *reg)
> +{
> + return (reg->id >> 5) & 0x1f;
> +}
> +
> +static unsigned int sve_reg_index(const struct kvm_one_reg *reg)
> +{
> + return reg->id & 0x1f;
> +}
> +
> +struct reg_bounds_struct {
> + char *kptr;
> + size_t start_offset;
> + size_t copy_count;
> + size_t flush_count;
> +};
> +
> +static int copy_bounded_reg_to_user(void __user *uptr,
> + const struct reg_bounds_struct *b)
> +{
> + if (copy_to_user(uptr, b->kptr, b->copy_count) ||
> + clear_user((char __user *)uptr + b->copy_count, b->flush_count))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int copy_bounded_reg_from_user(const struct reg_bounds_struct *b,
> + const void __user *uptr)
> +{
> + if (copy_from_user(b->kptr, uptr, b->copy_count))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int fpsimd_vreg_bounds(struct reg_bounds_struct *b,
> + struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + const size_t stride = KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]) -
> + KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
> + const size_t start = KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
> + const size_t limit = KVM_REG_ARM_CORE_REG(fp_regs.vregs[32]);
> +
> + const u64 uoffset = core_reg_offset_from_id(reg->id);
> + size_t usize = KVM_REG_SIZE(reg->id);
> + size_t start_vreg, end_vreg;
> +
> + if (WARN_ON((reg->id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE))
> + return -ENOENT;
> +
> + if (usize % sizeof(u32))
> + return -EINVAL;
> +
> + usize /= sizeof(u32);
> +
> + if ((uoffset <= start && usize <= start - uoffset) ||
> + uoffset >= limit)
> + return -ENOENT; /* not a vreg */
> +
> + BUILD_BUG_ON(uoffset > limit);
> + if (uoffset < start || usize > limit - uoffset)
> + return -EINVAL; /* overlaps vregs[] bounds */
> +
> + start_vreg = (uoffset - start) / stride;
> + end_vreg = ((uoffset - start) + usize - 1) / stride;
> + if (start_vreg != end_vreg)
> + return -EINVAL; /* spans multiple vregs */
> +
> + b->start_offset = ((uoffset - start) % stride) * sizeof(u32);
> + b->copy_count = usize * sizeof(u32);
> + b->flush_count = 0;
> +
> + if (vcpu_has_sve(&vcpu->arch)) {
> + const unsigned int vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> +
> + b->kptr = vcpu->arch.sve_state;
> + b->kptr += (SVE_SIG_ZREG_OFFSET(vq, start_vreg) -
> + SVE_SIG_REGS_OFFSET);
> + } else {
> + b->kptr = (char *)&vcpu_gp_regs(vcpu)->fp_regs.vregs[
> + start_vreg];
> + }
> +
> + return 0;
> +}
> +
> static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> {
> /*
> @@ -65,11 +170,20 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> * array. Hence below, nr_regs is the number of entries, and
> * off the index in the "array".
> */
> + int err;
> + struct reg_bounds_struct b;
> __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
> struct kvm_regs *regs = vcpu_gp_regs(vcpu);
> int nr_regs = sizeof(*regs) / sizeof(__u32);
> u32 off;
>
> + err = fpsimd_vreg_bounds(&b, vcpu, reg);
> + switch (err) {
> + case 0: return copy_bounded_reg_to_user(uaddr, &b);
> + case -ENOENT: break; /* not and FPSIMD vreg */
> + default: return err;
> + }
> +
> /* Our ID is an index into the kvm_regs struct. */
> off = core_reg_offset_from_id(reg->id);
> if (off >= nr_regs ||
> @@ -84,14 +198,23 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>
> static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> {
> + int err;
> + struct reg_bounds_struct b;
> __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
> struct kvm_regs *regs = vcpu_gp_regs(vcpu);
> int nr_regs = sizeof(*regs) / sizeof(__u32);
> __uint128_t tmp;
> void *valp = &tmp;
> u64 off;
> - int err = 0;
>
> + err = fpsimd_vreg_bounds(&b, vcpu, reg);
> + switch (err) {
> + case 0: return copy_bounded_reg_from_user(&b, uaddr);
> + case -ENOENT: break; /* not and FPSIMD vreg */
> + default: return err;
> + }
> +
> + err = 0;
> /* Our ID is an index into the kvm_regs struct. */
> off = core_reg_offset_from_id(reg->id);
> if (off >= nr_regs ||
> @@ -130,6 +253,78 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> return err;
> }
>
> +static int sve_reg_bounds(struct reg_bounds_struct *b,
> + const struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + unsigned int n = sve_reg_num(reg);
> + unsigned int i = sve_reg_index(reg);
> + unsigned int vl = vcpu->arch.sve_max_vl;
> + unsigned int vq = sve_vq_from_vl(vl);
> + unsigned int start, copy_limit, limit;
> +
> + b->kptr = vcpu->arch.sve_state;
> + if (is_zreg(reg)) {
> + b->kptr += SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET;
> + start = i * 0x100;
> + limit = start + 0x100;
> + copy_limit = vl;
> + } else if (is_preg(reg)) {
> + b->kptr += SVE_SIG_PREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET;
> + start = i * 0x20;
> + limit = start + 0x20;
> + copy_limit = vl / 8;
> + } else {
> + WARN_ON(1);
> + start = 0;
> + copy_limit = limit = 0;
> + }
> +
> + b->kptr += start;
> +
> + if (copy_limit < start)
> + copy_limit = start;
> + else if (copy_limit > limit)
> + copy_limit = limit;
> +
> + b->copy_count = copy_limit - start;
> + b->flush_count = limit - copy_limit;
> +
> + return 0;
> +}
> +
> +static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> +{
> + int ret;
> + struct reg_bounds_struct b;
> + char __user *uptr = (char __user *)reg->addr;
> +
> + if (!vcpu_has_sve(&vcpu->arch))
> + return -ENOENT;
> +
> + ret = sve_reg_bounds(&b, vcpu, reg);
> + if (ret)
> + return ret;
> +
> + return copy_bounded_reg_to_user(uptr, &b);
> +}
> +
> +static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> +{
> + int ret;
> + struct reg_bounds_struct b;
> + char __user *uptr = (char __user *)reg->addr;
> +
> + if (!vcpu_has_sve(&vcpu->arch))
> + return -ENOENT;
> +
> + ret = sve_reg_bounds(&b, vcpu, reg);
> + if (ret)
> + return ret;
> +
> + return copy_bounded_reg_from_user(&b, uptr);
> +}
> +
> int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
> {
> return -EINVAL;
> @@ -251,12 +446,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
> return -EINVAL;
>
> - /* Register group 16 means we want a core register. */
> - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
> - return get_core_reg(vcpu, reg);
> -
> - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
> - return kvm_arm_get_fw_reg(vcpu, reg);
> + switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
> + case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
> + case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
> + case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
> + }
>
> if (is_timer_reg(reg->id))
> return get_timer_reg(vcpu, reg);
> @@ -270,12 +464,11 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
> return -EINVAL;
>
> - /* Register group 16 means we set a core register. */
> - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
> - return set_core_reg(vcpu, reg);
> -
> - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
> - return kvm_arm_set_fw_reg(vcpu, reg);
> + switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
> + case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
> + case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
> + case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
> + }
>
> if (is_timer_reg(reg->id))
> return set_timer_reg(vcpu, reg);
> --
> 2.1.4
>
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
next prev parent reply other threads:[~2018-08-06 13:25 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-21 14:57 [RFC PATCH 00/16] KVM: arm64: Initial support for SVE guests Dave Martin
2018-06-21 14:57 ` [RFC PATCH 01/16] arm64: fpsimd: Always set TIF_FOREIGN_FPSTATE on task state flush Dave Martin
2018-07-06 9:07 ` Alex Bennée
2018-06-21 14:57 ` [RFC PATCH 02/16] KVM: arm64: Delete orphaned declaration for __fpsimd_enabled() Dave Martin
2018-07-06 9:08 ` Alex Bennée
2018-06-21 14:57 ` [RFC PATCH 03/16] KVM: arm64: Refactor kvm_arm_num_regs() for easier maintenance Dave Martin
2018-07-06 9:20 ` Alex Bennée
2018-06-21 14:57 ` [RFC PATCH 04/16] KVM: arm64: Add missing #include of <linux/bitmap.h> to kvm_host.h Dave Martin
2018-07-06 9:21 ` Alex Bennée
2018-06-21 14:57 ` [RFC PATCH 05/16] KVM: arm: Add arch init/uninit hooks Dave Martin
2018-07-06 10:02 ` Alex Bennée
2018-07-09 15:15 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 06/16] arm64/sve: Determine virtualisation-friendly vector lengths Dave Martin
2018-07-06 13:20 ` Marc Zyngier
2018-06-21 14:57 ` [RFC PATCH 07/16] arm64/sve: Enable SVE state tracking for non-task contexts Dave Martin
2018-07-25 13:58 ` Alex Bennée
2018-07-25 14:39 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 08/16] KVM: arm64: Support dynamically hideable system registers Dave Martin
2018-07-25 14:12 ` Alex Bennée
2018-07-25 14:36 ` Dave Martin
2018-07-25 15:41 ` Alex Bennée
2018-07-26 12:53 ` Dave Martin
2018-08-07 19:20 ` Christoffer Dall
2018-08-08 8:33 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 09/16] KVM: arm64: Allow ID registers to by dynamically read-as-zero Dave Martin
2018-07-25 15:46 ` Alex Bennée
2018-08-06 13:03 ` Christoffer Dall
2018-08-07 11:09 ` Dave Martin
2018-08-07 19:35 ` Christoffer Dall
2018-08-08 9:11 ` Dave Martin
2018-08-08 9:58 ` Christoffer Dall
2018-08-08 14:03 ` Peter Maydell
2018-08-09 10:19 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 10/16] KVM: arm64: Add a vcpu flag to control SVE visibility for the guest Dave Martin
2018-07-19 11:08 ` Andrew Jones
2018-07-25 11:41 ` Dave Martin
2018-07-25 13:43 ` Andrew Jones
2018-07-25 14:41 ` Dave Martin
2018-07-19 15:02 ` Andrew Jones
2018-07-25 11:48 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 11/16] KVM: arm64/sve: System register context switch and access support Dave Martin
2018-07-19 11:11 ` Andrew Jones
2018-07-25 11:45 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 12/16] KVM: arm64/sve: Context switch the SVE registers Dave Martin
2018-07-19 13:13 ` Andrew Jones
2018-07-25 11:50 ` Dave Martin
2018-07-25 13:57 ` Andrew Jones
2018-07-25 14:12 ` Dave Martin
2018-08-06 13:19 ` Christoffer Dall
2018-08-07 11:15 ` Dave Martin
2018-08-07 19:43 ` Christoffer Dall
2018-08-08 8:23 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 13/16] KVM: Allow 2048-bit register access via KVM_{GET, SET}_ONE_REG Dave Martin
2018-07-25 15:58 ` Alex Bennée
2018-07-26 12:58 ` Dave Martin
2018-07-26 13:55 ` Alex Bennée
2018-07-27 9:26 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface Dave Martin
2018-07-19 13:04 ` Andrew Jones
2018-07-25 14:06 ` Dave Martin
2018-07-25 17:20 ` Andrew Jones
2018-07-26 13:10 ` Dave Martin
2018-08-03 14:57 ` Dave Martin
2018-08-03 15:11 ` Andrew Jones
2018-08-03 15:38 ` Dave Martin
2018-08-06 13:25 ` Christoffer Dall [this message]
2018-08-07 11:17 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 15/16] KVM: arm64: Enumerate SVE register indices for KVM_GET_REG_LIST Dave Martin
2018-07-19 14:12 ` Andrew Jones
2018-07-25 14:50 ` Dave Martin
2018-06-21 14:57 ` [RFC PATCH 16/16] KVM: arm64/sve: Report and enable SVE API extensions for userspace Dave Martin
2018-07-19 14:59 ` Andrew Jones
2018-07-25 15:27 ` Dave Martin
2018-07-25 16:52 ` Andrew Jones
2018-07-26 13:18 ` Dave Martin
2018-08-06 13:41 ` Christoffer Dall
2018-08-07 11:23 ` Dave Martin
2018-08-07 20:08 ` Christoffer Dall
2018-08-08 8:30 ` Dave Martin
2018-07-19 15:24 ` Andrew Jones
2018-07-26 13:23 ` Dave Martin
2018-07-06 8:22 ` [RFC PATCH 00/16] KVM: arm64: Initial support for SVE guests Alex Bennée
2018-07-06 9:05 ` Dave Martin
2018-07-06 9:20 ` Alex Bennée
2018-07-06 9:23 ` Peter Maydell
2018-07-06 10:11 ` Alex Bennée
2018-07-06 10:14 ` Peter Maydell
2018-08-06 13:05 ` Christoffer Dall
2018-08-07 11:18 ` Dave Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180806132557.GD5985@e113682-lin.lund.arm.com \
--to=christoffer.dall@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).